aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMilosz Wasilewski <milosz.wasilewski@linaro.org>2015-08-27 15:50:28 +0100
committerMilosz Wasilewski <milosz.wasilewski@linaro.org>2015-08-27 15:50:28 +0100
commitf6e097c8c6acfe4a47a12f5c929888ce81d2055f (patch)
tree08a2b2f4fc5b37a848cc412778a984d9e64ccdd4
Initial (and hopefully only) commitHEADmaster
Signed-off-by: Milosz Wasilewski <milosz.wasilewski@linaro.org>
-rw-r--r--Makefile81
-rw-r--r--Makefile.in81
-rw-r--r--README-2.0016
-rw-r--r--README.txt15
-rw-r--r--bon_add.cpp139
-rw-r--r--bon_csv2html.1113
-rw-r--r--bon_csv2html.1.in113
-rw-r--r--bon_csv2html.cpp506
-rw-r--r--bon_csv2txt145
-rw-r--r--bon_csv2txt.11
-rwxr-xr-xbon_csv2txt.in145
-rw-r--r--bon_file.cpp628
-rw-r--r--bon_file.h51
-rw-r--r--bon_io.cpp367
-rw-r--r--bon_io.h56
-rw-r--r--bon_suid.cpp84
-rw-r--r--bon_time.cpp423
-rw-r--r--bon_time.h73
-rw-r--r--bonnie++.8239
-rw-r--r--bonnie++.cpp809
-rw-r--r--bonnie++.spec48
-rw-r--r--bonnie++.spec.in48
-rw-r--r--bonnie.h69
-rw-r--r--bonnie.h.in69
l---------changelog.txt1
-rw-r--r--conf.h10
-rw-r--r--conf.h.in9
-rwxr-xr-xconfigure5294
-rw-r--r--configure.in126
-rw-r--r--copyright.txt19
-rw-r--r--credits.txt10
-rw-r--r--debian/changelog655
-rw-r--r--debian/changelog.txt380
-rw-r--r--debian/control21
-rw-r--r--debian/copyright12
-rw-r--r--debian/dirs2
-rw-r--r--debian/docs2
-rwxr-xr-xdebian/rules80
-rw-r--r--depends18
-rw-r--r--duration.cpp61
-rw-r--r--duration.h38
-rw-r--r--generate_randfile.141
-rw-r--r--generate_randfile.cpp58
-rw-r--r--getc_putc.8109
-rw-r--r--getc_putc.cpp284
-rw-r--r--getc_putc.h29
-rw-r--r--getc_putc_helper.cpp64
-rw-r--r--install.sh164
-rw-r--r--port.h74
-rw-r--r--port.h.in74
-rw-r--r--rand.cpp56
-rw-r--r--rand.h49
-rw-r--r--readme.html178
-rw-r--r--semaphore.cpp129
-rw-r--r--semaphore.h51
-rw-r--r--sh.common154
-rw-r--r--sun/Makefile17
-rw-r--r--sun/pkginfo9
-rw-r--r--sun/pkginfo.in9
-rw-r--r--sun/prototype16
-rw-r--r--sun/tmp-conv0
-rw-r--r--sync.cpp34
-rw-r--r--sync.h22
-rw-r--r--thread.cpp162
-rw-r--r--thread.h76
-rw-r--r--util.cpp24
-rw-r--r--zcav.8105
-rw-r--r--zcav.cpp287
-rw-r--r--zcav_io.cpp248
-rw-r--r--zcav_io.h66
70 files changed, 13646 insertions, 0 deletions
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..4bb5103
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,81 @@
+EXES=bonnie++ zcav getc_putc getc_putc_helper
+EXE=bon_csv2html generate_randfile
+
+all: $(EXE) $(EXES)
+
+SCRIPTS=bon_csv2txt
+
+prefix=/usr/local
+eprefix=${prefix}
+#MORE_WARNINGS=-Weffc++
+WFLAGS=-Wall -W -Wshadow -Wpointer-arith -Wwrite-strings -pedantic -ffor-scope -Wcast-align -Wsign-compare -Wpointer-arith -Wwrite-strings -Wformat-security -Wswitch-enum -Winit-self $(MORE_WARNINGS)
+CFLAGS=-O2 -DNDEBUG $(WFLAGS) $(MORECFLAGS)
+CXX=g++ $(CFLAGS)
+LINK=g++
+THREAD_LFLAGS=-lpthread
+
+INSTALL=/usr/bin/install -c
+INSTALL_PROGRAM=${INSTALL}
+
+BONSRC=bonnie++.cpp bon_io.cpp bon_file.cpp bon_time.cpp semaphore.cpp \
+ sync.cpp thread.cpp bon_suid.cpp duration.cpp rand.o util.o
+BONOBJS=$(BONSRC:.cpp=.o)
+
+MAN1=bon_csv2html.1 bon_csv2txt.1 generate_randfile.1
+MAN8=bonnie++.8 zcav.8 getc_putc.8
+
+ZCAVSRC=zcav.cpp thread.cpp zcav_io.cpp bon_suid.cpp duration.cpp
+ZCAVOBJS=$(ZCAVSRC:.cpp=.o)
+
+GETCSRC=getc_putc.cpp bon_suid.cpp duration.cpp util.o
+GETCOBJS=$(GETCSRC:.cpp=.o)
+
+GETCHSRC=getc_putc_helper.cpp duration.cpp
+GETCHOBJS=$(GETCHSRC:.cpp=.o)
+
+bonnie++: $(BONOBJS)
+ $(LINK) -o bonnie++ $(BONOBJS) $(THREAD_LFLAGS)
+
+zcav: $(ZCAVOBJS)
+ $(LINK) -o zcav $(ZCAVOBJS) $(THREAD_LFLAGS)
+
+getc_putc: $(GETCOBJS) getc_putc_helper
+ $(LINK) -o getc_putc $(GETCOBJS) $(THREAD_LFLAGS)
+
+getc_putc_helper: $(GETCHOBJS)
+ $(CXX) -o getc_putc_helper $(GETCHOBJS)
+
+bon_csv2html: bon_csv2html.o
+ $(LINK) bon_csv2html.o -o bon_csv2html
+
+generate_randfile: generate_randfile.o
+ $(LINK) generate_randfile.o -o generate_randfile
+
+install-bin: $(EXE) $(EXES)
+ mkdir -p $(eprefix)/bin $(eprefix)/sbin
+ ${INSTALL} -s $(EXES) $(eprefix)/sbin
+ ${INSTALL} -s $(EXE) $(eprefix)/bin
+ ${INSTALL} $(SCRIPTS) $(eprefix)/bin
+
+install: install-bin
+ mkdir -p ${prefix}/share/man/man1 ${prefix}/share/man/man8
+ ${INSTALL} -m 644 $(MAN1) ${prefix}/share/man/man1
+ ${INSTALL} -m 644 $(MAN8) ${prefix}/share/man/man8
+
+%.o: %.cpp
+ $(CXX) -c $<
+
+clean:
+ rm -f $(EXE) $(EXES) *.o build-stamp install-stamp
+ rm -rf debian/tmp core debian/*.debhelper
+ rm -f debian/{substvars,files} config.log depends.bak
+
+realclean: clean
+ rm -f config.* Makefile bonnie++.spec port.h conf.h configure.lineno
+ rm -f bon_csv2txt bon_csv2html.1 sun/pkginfo bonnie.h
+
+dep:
+ makedepend -Y -f depends *.cpp 2> /dev/null
+
+include depends
+
diff --git a/Makefile.in b/Makefile.in
new file mode 100644
index 0000000..4f14819
--- /dev/null
+++ b/Makefile.in
@@ -0,0 +1,81 @@
+EXES=bonnie++ zcav getc_putc getc_putc_helper
+EXE=bon_csv2html generate_randfile
+
+all: $(EXE) $(EXES)
+
+SCRIPTS=bon_csv2txt
+
+prefix=@prefix@
+eprefix=@exec_prefix@
+#MORE_WARNINGS=-Weffc++
+WFLAGS=-Wall -W -Wshadow -Wpointer-arith -Wwrite-strings -pedantic -ffor-scope -Wcast-align -Wsign-compare -Wpointer-arith -Wwrite-strings -Wformat-security -Wswitch-enum -Winit-self $(MORE_WARNINGS)
+CFLAGS=-O2 @debug@ -DNDEBUG $(WFLAGS) $(MORECFLAGS)
+CXX=@CXX@ $(CFLAGS)
+LINK=@CXX@
+THREAD_LFLAGS=@thread_ldflags@
+
+INSTALL=@INSTALL@
+INSTALL_PROGRAM=@INSTALL_PROGRAM@
+
+BONSRC=bonnie++.cpp bon_io.cpp bon_file.cpp bon_time.cpp semaphore.cpp \
+ sync.cpp thread.cpp bon_suid.cpp duration.cpp rand.o util.o
+BONOBJS=$(BONSRC:.cpp=.o)
+
+MAN1=bon_csv2html.1 bon_csv2txt.1 generate_randfile.1
+MAN8=bonnie++.8 zcav.8 getc_putc.8
+
+ZCAVSRC=zcav.cpp thread.cpp zcav_io.cpp bon_suid.cpp duration.cpp
+ZCAVOBJS=$(ZCAVSRC:.cpp=.o)
+
+GETCSRC=getc_putc.cpp bon_suid.cpp duration.cpp util.o
+GETCOBJS=$(GETCSRC:.cpp=.o)
+
+GETCHSRC=getc_putc_helper.cpp duration.cpp
+GETCHOBJS=$(GETCHSRC:.cpp=.o)
+
+bonnie++: $(BONOBJS)
+ $(LINK) -o bonnie++ $(BONOBJS) $(THREAD_LFLAGS)
+
+zcav: $(ZCAVOBJS)
+ $(LINK) -o zcav $(ZCAVOBJS) $(THREAD_LFLAGS)
+
+getc_putc: $(GETCOBJS) getc_putc_helper
+ $(LINK) -o getc_putc $(GETCOBJS) $(THREAD_LFLAGS)
+
+getc_putc_helper: $(GETCHOBJS)
+ $(CXX) -o getc_putc_helper $(GETCHOBJS)
+
+bon_csv2html: bon_csv2html.o
+ $(LINK) bon_csv2html.o -o bon_csv2html
+
+generate_randfile: generate_randfile.o
+ $(LINK) generate_randfile.o -o generate_randfile
+
+install-bin: $(EXE) $(EXES)
+ mkdir -p $(eprefix)/bin $(eprefix)/sbin
+ @INSTALL_PROGRAM@ @stripping@ $(EXES) $(eprefix)/sbin
+ @INSTALL_PROGRAM@ @stripping@ $(EXE) $(eprefix)/bin
+ @INSTALL_SCRIPT@ $(SCRIPTS) $(eprefix)/bin
+
+install: install-bin
+ mkdir -p @mandir@/man1 @mandir@/man8
+ @INSTALL_DATA@ $(MAN1) @mandir@/man1
+ @INSTALL_DATA@ $(MAN8) @mandir@/man8
+
+%.o: %.cpp
+ $(CXX) -c $<
+
+clean:
+ rm -f $(EXE) $(EXES) *.o build-stamp install-stamp
+ rm -rf debian/tmp core debian/*.debhelper
+ rm -f debian/{substvars,files} config.log depends.bak
+
+realclean: clean
+ rm -f config.* Makefile bonnie++.spec port.h conf.h configure.lineno
+ rm -f bon_csv2txt bon_csv2html.1 sun/pkginfo bonnie.h
+
+dep:
+ makedepend -Y -f depends *.cpp 2> /dev/null
+
+include depends
+
diff --git a/README-2.00 b/README-2.00
new file mode 100644
index 0000000..e4221b0
--- /dev/null
+++ b/README-2.00
@@ -0,0 +1,16 @@
+Differences between Bonnie++ 1.00 and 2.00.
+
+Version 2.00 is totally threaded, this has many subtle impacts on the way the
+code works. This changed the per-char results so I decided to make it do
+per-byte tests using write() and read() instead. The results are now much
+less. From 1.92.
+
+When closing files for the IO tests the operation is to fsync() each file
+handle. This means on Linux and other OSs that agressively cache writes the
+write performance will be noticably less, but the results will be more
+accurate. From 1.90b.
+
+The number of seek processes is now 5 instead of 3. Now almost all new hard
+drives have some sort of seek reordering capability, and OSs are getting
+smarter about maintaining queues. This and the increasing popularity of RAID
+arrays requires more seek procs to do a reasonable test.
diff --git a/README.txt b/README.txt
new file mode 100644
index 0000000..f86a97b
--- /dev/null
+++ b/README.txt
@@ -0,0 +1,15 @@
+To build a Solaris package type "make -C sun".
+
+To build a Debian package type "fakeroot dpkg-buildpackage".
+
+To build a RPM type "SOMEONE TELL ME WHAT TO PUT HERE".
+
+To compile for any other Unix type "./configure" and then type "make" after
+the configure script successfully completes. If ./configure produces errors
+then send me your config.log file (don't bother trying to compile it until
+you have ./configure run correctly).
+
+Unix compilation currently requires GCC and GNU Make. You should be able to
+compile with a non-GCC compiler by running the following command:
+CFLAGS="..." make
+Where ... represents the compilation flags for your C++ compiler.
diff --git a/bon_add.cpp b/bon_add.cpp
new file mode 100644
index 0000000..b1c8445
--- /dev/null
+++ b/bon_add.cpp
@@ -0,0 +1,139 @@
+#include "bonnie.h"
+#include <stdio.h>
+#include <vector>
+#include <string.h>
+#include <math.h>
+
+// Maximum number of items expected on a csv line
+#define MAX_ITEMS 45
+typedef vector<PCCHAR> STR_VEC;
+
+vector<STR_VEC> data;
+typedef PCCHAR * PPCCHAR;
+PPCCHAR * props;
+
+// Splits a line of text (CSV format) by commas and adds it to the list to
+// process later. Doesn't keep any pointers to the buf...
+void read_in(CPCCHAR buf);
+// print line in the specified line from columns start..end as a line of a
+// HTML table
+void print_a_line(int num, int start, int end);
+// 0 means don't do colors, 1 means speed, 2 means CPU, 3 means latency
+const int vals[MAX_ITEMS] =
+ { 0,0,0,0,0,1,2,1,2,1,2,1,2,1,2,1,2,
+ 0,0,0,0,1,2,1,2,1,2,1,2,1,2,1,2,
+ 3,3,3,3,3,3,3,3,3,3,3,3 };
+
+void usage()
+{
+ exit(1);
+}
+
+int main(int argc, char **argv)
+{
+ char buf[1024];
+
+ FILE *fp = NULL;
+ if(argc > 1)
+ {
+ fp = fopen(argv[1], "r");
+ if(!fp)
+ usage();
+ }
+ while(fgets(buf, sizeof(buf), fp ? fp : stdin))
+ {
+ buf[sizeof(buf) - 1] = '\0';
+ strtok(buf, "\r\n");
+ read_in(buf);
+ }
+
+ printf("%s", data[0][0]);
+ int i;
+ for(i = 1; i < MAX_ITEMS; i++)
+ {
+ switch(vals[i])
+ {
+ case 0:
+ printf(",%s", data[0][i]);
+ break;
+ case 1:
+ case 2:
+ {
+ int sum = 0, tmp = 0;
+ for(int j = 0; j < data.size(); j++)
+ {
+ if(sscanf(data[j][i], "%d", &tmp) != 1)
+ {
+ sum = 0;
+ j = data.size();
+ }
+ sum += tmp;
+ }
+ if(sum > 0)
+ printf(",%d", sum);
+ else
+ printf(",");
+ }
+ break;
+ case 3:
+ {
+ double max = 0.0;
+ int tmp = 0;
+ for(int j = 0; j < data.size(); j++)
+ {
+ if(sscanf(data[j][i], "%d", &tmp) != 1)
+ {
+ max = 0.0;
+ j = data.size();
+ }
+ double dtmp = double(tmp);
+ if(strstr(data[j][i], "ms"))
+ dtmp *= 1000.0;
+ else if(!strstr(data[j][i], "us"))
+ dtmp *= 1000000.0;
+ if(dtmp > max)
+ max = dtmp;
+ }
+ if(max > 99999999.0)
+ printf(",%ds", int(max / 1000000.0));
+ else if(max > 99999.0)
+ printf(",%dms", int(max / 1000.0));
+ else
+ printf(",%dus", int(max));
+ }
+ break;
+ }
+ }
+ printf("\n");
+ return 0;
+}
+
+STR_VEC split(CPCCHAR delim, CPCCHAR buf)
+{
+ STR_VEC arr;
+ char *tmp = strdup(buf);
+ while(1)
+ {
+ arr.push_back(tmp);
+ tmp = strstr(tmp, delim);
+ if(!tmp)
+ break;
+ *tmp = '\0';
+ tmp += strlen(delim);
+ }
+ return arr;
+}
+
+void read_in(CPCCHAR buf)
+{
+ STR_VEC arr = split(",", buf);
+ if(strcmp(arr[0], "2") )
+ {
+ fprintf(stderr, "Can't process: %s\n", buf);
+ free((void *)arr[0]);
+ return;
+ }
+
+ data.push_back(arr);
+}
+
diff --git a/bon_csv2html.1 b/bon_csv2html.1
new file mode 100644
index 0000000..ee02a98
--- /dev/null
+++ b/bon_csv2html.1
@@ -0,0 +1,113 @@
+.TH bon_csv2html 1
+.SH "NAME"
+.BR bon_csv2html
+program to convert CSV format Bonnie++ data to a HTML form
+using tables suitable for display on a web page. NB Lynx can't display this
+properly, and due to the size it probably requires 1024x768 monitor to display
+properly.
+.P
+.BR bon_csv2txt
+program to convert CSV format Bonnie++ data to plain-text format
+suitable for pasting into an email or reading on a Braille display.
+
+.SH "DESCRIPTION"
+They take CSV format (comma-delimited spreadsheet files AKA Comma Seperated
+Values in MS land) data on standard input and produce HTML or plain text on
+standard output respectively.
+
+.SH "FORMAT"
+This is a list of the fields used in the CSV files format version 2. Format
+version 1 was the type used in Bonnie++ < 1.90.
+Before each field I list the field number as well as the name given in the heading
+.TP
+.B 0 format_version
+Version of the output format in use (1.97)
+.TP
+.B 1 bonnie_version
+(1.97)
+.TP
+.B 2 name
+Machine Name
+.TP
+.B 3 concurrency
+The number of copies of each operation to be run at the same time
+.TP
+.B 4 seed
+Random number seed
+.TP
+.B 5 file_size
+Size in megs for the IO tests
+.TP
+.B 6 chunk_size
+Size of chunks in bytes
+.TP
+.B 7 putc,putc_cpu
+Results for writing a character at a time K/s,%CPU
+.TP
+.B 9 put_block,put_block_cpu
+Results for writing a block at a time K/s,%CPU
+.TP
+.B 11 rewrite,rewrite_cpu
+Results for reading and re-writing a block at a time K/s,%CPU
+.TP
+.B 13 getc,getc_cpu
+Results for reading a character at a time K/s,%CPU
+.TP
+.B 15 get_block,get_block_cpu
+Results for reading a block at a time K/s,%CPU
+.TP
+.B 17 seeks,seeks_cpu
+Results for the seek test seeks/s,%CPU
+.TP
+.B 19 num_files
+Number of files for file-creation tests (units of 1024 files)
+.TP
+.B 20 max_size
+The maximum size of files for file-creation tests. Or the type of files for
+links.
+.TP
+.B 21 min_size
+The minimum size of files for file-creation tests.
+.TP
+.B 22 num_dirs
+The number of directories for creation of files in multiple directories.
+.TP
+.B 23 file_chunk_size
+The size of blocks for writing multiple files.
+.TP
+.B 24 seq_create,seq_create_cpu
+Rate of creating files sequentially files/s,%CPU
+.TP
+.B 26 seq_stat,seq_stat_cpu
+Rate of reading/stating files sequentially files/s,%CPU
+.TP
+.B 28 seq_del,seq_del_cpu
+Rate of deleting files sequentially files/s,%CPU
+.TP
+.B 30 ran_create,ran_create_cpu
+Rate of creating files in random order files/s,%CPU
+.TP
+.B 32 ran_stat,ran_stat_cpu
+Rate of deleting files in random order files/s,%CPU
+.TP
+.B 34 ran_del,ran_del_cpu
+Rate of deleting files in random order files/s,%CPU
+.TP
+.B 36 putc_latency,put_block_latency,rewrite_latency
+Latency (maximum amount of time for a single operation) for putc, put_block,
+and reqrite
+.TP
+.B 39 getc_latency,get_block_latency,seeks_latency
+Latency for getc, get_block, and seeks
+.TP
+.B 42 seq_create_latency,seq_stat_latency,seq_del_latency
+Latency for seq_create, seq_stat, and seq_del
+.TP
+.B 45 ran_create_latency,ran_stat_latency,ran_del_latency
+Latency for ran_create, ran_stat, and ran_del
+.P
+A string that starts with '#' is a comment.
+
+.SH "AUTHOR"
+These programs were written by Russell Coker <russell@coker.com.au>. May be
+freely used and distributed without restriction.
diff --git a/bon_csv2html.1.in b/bon_csv2html.1.in
new file mode 100644
index 0000000..7579217
--- /dev/null
+++ b/bon_csv2html.1.in
@@ -0,0 +1,113 @@
+.TH bon_csv2html 1
+.SH "NAME"
+.BR bon_csv2html
+program to convert CSV format Bonnie++ data to a HTML form
+using tables suitable for display on a web page. NB Lynx can't display this
+properly, and due to the size it probably requires 1024x768 monitor to display
+properly.
+.P
+.BR bon_csv2txt
+program to convert CSV format Bonnie++ data to plain-text format
+suitable for pasting into an email or reading on a Braille display.
+
+.SH "DESCRIPTION"
+They take CSV format (comma-delimited spreadsheet files AKA Comma Seperated
+Values in MS land) data on standard input and produce HTML or plain text on
+standard output respectively.
+
+.SH "FORMAT"
+This is a list of the fields used in the CSV files format version 2. Format
+version 1 was the type used in Bonnie++ < 1.90.
+Before each field I list the field number as well as the name given in the heading
+.TP
+.B 0 format_version
+Version of the output format in use (@csv_version@)
+.TP
+.B 1 bonnie_version
+(@version@)
+.TP
+.B 2 name
+Machine Name
+.TP
+.B 3 concurrency
+The number of copies of each operation to be run at the same time
+.TP
+.B 4 seed
+Random number seed
+.TP
+.B 5 file_size
+Size in megs for the IO tests
+.TP
+.B 6 chunk_size
+Size of chunks in bytes
+.TP
+.B 7 putc,putc_cpu
+Results for writing a character at a time K/s,%CPU
+.TP
+.B 9 put_block,put_block_cpu
+Results for writing a block at a time K/s,%CPU
+.TP
+.B 11 rewrite,rewrite_cpu
+Results for reading and re-writing a block at a time K/s,%CPU
+.TP
+.B 13 getc,getc_cpu
+Results for reading a character at a time K/s,%CPU
+.TP
+.B 15 get_block,get_block_cpu
+Results for reading a block at a time K/s,%CPU
+.TP
+.B 17 seeks,seeks_cpu
+Results for the seek test seeks/s,%CPU
+.TP
+.B 19 num_files
+Number of files for file-creation tests (units of 1024 files)
+.TP
+.B 20 max_size
+The maximum size of files for file-creation tests. Or the type of files for
+links.
+.TP
+.B 21 min_size
+The minimum size of files for file-creation tests.
+.TP
+.B 22 num_dirs
+The number of directories for creation of files in multiple directories.
+.TP
+.B 23 file_chunk_size
+The size of blocks for writing multiple files.
+.TP
+.B 24 seq_create,seq_create_cpu
+Rate of creating files sequentially files/s,%CPU
+.TP
+.B 26 seq_stat,seq_stat_cpu
+Rate of reading/stating files sequentially files/s,%CPU
+.TP
+.B 28 seq_del,seq_del_cpu
+Rate of deleting files sequentially files/s,%CPU
+.TP
+.B 30 ran_create,ran_create_cpu
+Rate of creating files in random order files/s,%CPU
+.TP
+.B 32 ran_stat,ran_stat_cpu
+Rate of deleting files in random order files/s,%CPU
+.TP
+.B 34 ran_del,ran_del_cpu
+Rate of deleting files in random order files/s,%CPU
+.TP
+.B 36 putc_latency,put_block_latency,rewrite_latency
+Latency (maximum amount of time for a single operation) for putc, put_block,
+and reqrite
+.TP
+.B 39 getc_latency,get_block_latency,seeks_latency
+Latency for getc, get_block, and seeks
+.TP
+.B 42 seq_create_latency,seq_stat_latency,seq_del_latency
+Latency for seq_create, seq_stat, and seq_del
+.TP
+.B 45 ran_create_latency,ran_stat_latency,ran_del_latency
+Latency for ran_create, ran_stat, and ran_del
+.P
+A string that starts with '#' is a comment.
+
+.SH "AUTHOR"
+These programs were written by Russell Coker <russell@coker.com.au>. May be
+freely used and distributed without restriction.
diff --git a/bon_csv2html.cpp b/bon_csv2html.cpp
new file mode 100644
index 0000000..2607726
--- /dev/null
+++ b/bon_csv2html.cpp
@@ -0,0 +1,506 @@
+#include "bonnie.h"
+#include <cstdlib>
+#include <stdio.h>
+#include <vector>
+#include <string.h>
+#include <math.h>
+
+// Maximum number of items expected on a csv line
+#define MAX_ITEMS 48
+using namespace std;
+typedef vector<PCCHAR> STR_VEC;
+
+vector<STR_VEC> data;
+typedef PCCHAR * PPCCHAR;
+PPCCHAR * props;
+
+// Print the start of the HTML file
+// return the number of columns space in the middle
+int header();
+// Splits a line of text (CSV format) by commas and adds it to the list to
+// process later. Doesn't keep any pointers to the buf...
+void read_in(CPCCHAR buf);
+// print line in the specified line from columns start..end as a line of a
+// HTML table
+void print_a_line(int num, int start, int end);
+// print a single item of data
+void print_item(int num, int item, CPCCHAR extra = NULL);
+// Print the end of the HTML file
+void footer();
+// Calculate the colors for backgrounds
+void calc_vals();
+// Returns a string representation of a color that maps to the value. The
+// range of values is 0..range_col and val is the value. If reverse is set
+// then low values are green and high values are red.
+PCCHAR get_col(double range_col, double val, bool reverse, CPCCHAR extra);
+
+typedef enum { eNoCols, eSpeed, eCPU, eLatency } VALS_TYPE;
+const VALS_TYPE vals[MAX_ITEMS] =
+ { eNoCols,eNoCols,eNoCols,eNoCols,eNoCols,eNoCols,eNoCols,eSpeed,eCPU,eSpeed,eCPU,eSpeed,eCPU,eSpeed,eCPU,eSpeed,eCPU,eSpeed,eCPU,
+ eNoCols,eNoCols,eNoCols,eNoCols,eNoCols,eSpeed,eCPU,eSpeed,eCPU,eSpeed,eCPU,eSpeed,eCPU,eSpeed,eCPU,eSpeed,eCPU,
+ eLatency,eLatency,eLatency,eLatency,eLatency,eLatency,eLatency,eLatency,eLatency,eLatency,eLatency,eLatency };
+
+bool col_used[MAX_ITEMS];
+#define COL_NAME 2
+#define COL_CONCURRENCY 3
+#define COL_FILE_size 5
+#define COL_DATA_CHUNK_size 6
+#define COL_PUTC 7
+#define COL_NUM_FILES 19
+#define COL_MAX_size 20
+#define COL_MIN_size 21
+#define COL_NUM_DIRS 22
+#define COL_FILE_CHUNK_size 23
+#define COL_RAN_DEL_CPU 35
+#define COL_PUTC_LATENCY 36
+#define COL_SEEKS_LATENCY 41
+#define COL_SEQ_CREATE_LATENCY 42
+#define COL_RAN_DEL_LATENCY 47
+
+void usage()
+{
+ exit(1);
+}
+
+int main(int argc, char **argv)
+{
+ unsigned int i;
+ for(i = 0; i < MAX_ITEMS; i++)
+ col_used[i] = false;
+
+ char buf[1024];
+
+ FILE *fp = NULL;
+ if(argc > 1)
+ {
+ fp = fopen(argv[1], "r");
+ if(!fp)
+ usage();
+ }
+ while(fgets(buf, sizeof(buf), fp ? fp : stdin))
+ {
+ buf[sizeof(buf) - 1] = '\0';
+ strtok(buf, "\r\n");
+ read_in(buf);
+ }
+
+ props = new PPCCHAR[data.size()];
+ for(i = 0; i < data.size(); i++)
+ {
+ props[i] = new PCCHAR[MAX_ITEMS];
+ props[i][0] = NULL;
+ props[i][1] = NULL;
+ props[i][COL_NAME] = "bgcolor=\"#FFFFFF\" class=\"rowheader\"><font size=+1";
+ int j;
+ for(j = COL_CONCURRENCY; j < MAX_ITEMS; j++)
+ {
+ if( (j >= COL_NUM_FILES && j <= COL_FILE_CHUNK_size) || j <= COL_DATA_CHUNK_size )
+ {
+ props[i][j] = "class=\"size\" bgcolor=\"#FFFFFF\"";
+ }
+ else
+ {
+ props[i][j] = NULL;
+ }
+ }
+ }
+ calc_vals();
+ int mid_width = header();
+ for(i = 0; i < data.size(); i++)
+ {
+// First print the average speed line
+ printf("<tr>");
+ print_item(i, COL_NAME, "rowspan=\"2\"");
+ if(col_used[COL_CONCURRENCY] == true)
+ print_item(i, COL_CONCURRENCY);
+ print_item(i, COL_FILE_size); // file_size
+ if(col_used[COL_DATA_CHUNK_size] == true)
+ print_item(i, COL_DATA_CHUNK_size);
+ print_a_line(i, COL_PUTC, COL_NUM_FILES);
+ if(col_used[COL_MAX_size])
+ print_item(i, COL_MAX_size);
+ if(col_used[COL_MIN_size])
+ print_item(i, COL_MIN_size);
+ if(col_used[COL_NUM_DIRS])
+ print_item(i, COL_NUM_DIRS);
+ if(col_used[COL_FILE_CHUNK_size])
+ print_item(i, COL_FILE_CHUNK_size);
+ print_a_line(i, COL_FILE_CHUNK_size + 1, COL_RAN_DEL_CPU);
+ printf("</tr>\n");
+// Now print the latency line
+ printf("<tr>");
+ int lat_width = 1;
+ if(col_used[COL_DATA_CHUNK_size] == true)
+ lat_width++;
+ if(col_used[COL_CONCURRENCY] == true)
+ lat_width++;
+ printf("<td class=\"size\" bgcolor=\"#FFFFFF\" colspan=\"%d\">Latency</td>"
+ , lat_width);
+ print_a_line(i, COL_PUTC_LATENCY, COL_SEEKS_LATENCY);
+ int bef_lat_width;
+ lat_width = 1;
+ if(mid_width > 1)
+ lat_width = 2;
+ bef_lat_width = mid_width - lat_width;
+ if(bef_lat_width)
+ printf("<td colspan=\"%d\"></td>", bef_lat_width);
+ printf("<td class=\"size\" bgcolor=\"#FFFFFF\" colspan=\"%d\">Latency</td>", lat_width);
+ print_a_line(i, COL_SEQ_CREATE_LATENCY, COL_RAN_DEL_LATENCY);
+ printf("</tr>\n");
+ }
+ footer();
+ return 0;
+}
+
+typedef struct { double val; int pos; int col_ind; } ITEM;
+typedef ITEM * PITEM;
+
+int compar(const void *a, const void *b)
+{
+ double a1 = PITEM(a)->val;
+ double b1 = PITEM(b)->val;
+ if(a1 < b1)
+ return -1;
+ if(a1 > b1)
+ return 1;
+ return 0;
+}
+
+void calc_vals()
+{
+ ITEM *arr = new ITEM[data.size()];
+ for(unsigned int column_ind = 0; column_ind < MAX_ITEMS; column_ind++)
+ {
+ switch(vals[column_ind])
+ {
+ case eNoCols:
+ {
+ for(unsigned int row_ind = 0; row_ind < data.size(); row_ind++)
+ {
+ if(column_ind == COL_CONCURRENCY)
+ {
+ if(data[row_ind][column_ind] && strcmp("1", data[row_ind][column_ind]))
+ col_used[column_ind] = true;
+ }
+ else
+ {
+ if(data[row_ind][column_ind] && strlen(data[row_ind][column_ind]))
+ col_used[column_ind] = true;
+ }
+ }
+ }
+ break;
+ case eCPU:
+ {
+ for(unsigned int row_ind = 0; row_ind < data.size(); row_ind++)
+ {
+ double work, cpu;
+ arr[row_ind].val = 0.0;
+ if(data[row_ind].size() > column_ind
+ && sscanf(data[row_ind][column_ind - 1], "%lf", &work) == 1
+ && sscanf(data[row_ind][column_ind], "%lf", &cpu) == 1)
+ {
+ arr[row_ind].val = cpu / work;
+ }
+ arr[row_ind].pos = row_ind;
+ }
+ qsort(arr, data.size(), sizeof(ITEM), compar);
+ int col_count = -1;
+ double min_col = -1.0, max_col = -1.0;
+ for(unsigned int sort_ind = 0; sort_ind < data.size(); sort_ind++)
+ {
+ // if item is different from previous or if the first row
+ // (sort_ind == 0) then increment col count
+ if(sort_ind == 0 || arr[sort_ind].val != arr[sort_ind - 1].val)
+ {
+ if(arr[sort_ind].val != 0.0)
+ {
+ col_count++;
+ if(min_col == -1.0)
+ min_col = arr[sort_ind].val;
+ else
+ min_col = min(arr[sort_ind].val, min_col);
+ max_col = max(max_col, arr[sort_ind].val);
+ }
+ }
+ arr[sort_ind].col_ind = col_count;
+ }
+ // if more than 1 line has data then calculate colors
+ if(col_count > 0)
+ {
+ double divisor = max_col / min_col;
+ if(divisor < 2.0)
+ {
+ double mult = sqrt(2.0 / divisor);
+ max_col *= mult;
+ min_col /= mult;
+ }
+ double range_col = max_col - min_col;
+ for(unsigned int sort_ind = 0; sort_ind < data.size(); sort_ind++)
+ {
+ if(arr[sort_ind].col_ind > -1)
+ {
+ props[arr[sort_ind].pos][column_ind]
+ = get_col(range_col, arr[sort_ind].val - min_col, true, "");
+ }
+ }
+ }
+ else
+ {
+ for(unsigned int sort_ind = 0; sort_ind < data.size(); sort_ind++)
+ {
+ if(vals[column_ind] == eLatency)
+ {
+ props[sort_ind][column_ind] = "colspan=\"2\"";
+ }
+ }
+ }
+ }
+ break;
+ case eSpeed:
+ case eLatency:
+ {
+ for(unsigned int row_ind = 0; row_ind < data.size(); row_ind++)
+ {
+ arr[row_ind].val = 0.0;
+ if(data[row_ind].size() <= column_ind
+ || sscanf(data[row_ind][column_ind], "%lf", &arr[row_ind].val) == 0)
+ arr[row_ind].val = 0.0;
+ if(vals[column_ind] == eLatency && arr[row_ind].val != 0.0)
+ {
+ if(strstr(data[row_ind][column_ind], "ms"))
+ arr[row_ind].val *= 1000.0;
+ else if(!strstr(data[row_ind][column_ind], "us"))
+ arr[row_ind].val *= 1000000.0; // is !us && !ms then secs!
+ }
+ arr[row_ind].pos = row_ind;
+ }
+ qsort(arr, data.size(), sizeof(ITEM), compar);
+ int col_count = -1;
+ double min_col = -1.0, max_col = -1.0;
+ for(unsigned int sort_ind = 0; sort_ind < data.size(); sort_ind++)
+ {
+ // if item is different from previous or if the first row
+ // (sort_ind == 0) then increment col count
+ if(sort_ind == 0 || arr[sort_ind].val != arr[sort_ind - 1].val)
+ {
+ if(arr[sort_ind].val != 0.0)
+ {
+ col_count++;
+ if(min_col == -1.0)
+ min_col = arr[sort_ind].val;
+ else
+ min_col = min(arr[sort_ind].val, min_col);
+ max_col = max(max_col, arr[sort_ind].val);
+ }
+ }
+ arr[sort_ind].col_ind = col_count;
+ }
+ // if more than 1 line has data then calculate colors
+ if(col_count > 0)
+ {
+ double divisor = max_col / min_col;
+ if(divisor < 2.0)
+ {
+ double mult = sqrt(2.0 / divisor);
+ max_col *= mult;
+ min_col /= mult;
+ }
+ double range_col = max_col - min_col;
+ for(unsigned int sort_ind = 0; sort_ind < data.size(); sort_ind++)
+ {
+ if(arr[sort_ind].col_ind > -1)
+ {
+ bool reverse = false;
+ PCCHAR extra = "";
+ if(vals[column_ind] != eSpeed)
+ {
+ reverse = true;
+ extra = " colspan=\"2\"";
+ }
+ props[arr[sort_ind].pos][column_ind]
+ = get_col(range_col, arr[sort_ind].val - min_col, reverse, extra);
+ }
+ else if(vals[column_ind] != eSpeed)
+ {
+ props[arr[sort_ind].pos][column_ind] = "colspan=\"2\"";
+ }
+ }
+ }
+ else
+ {
+ for(unsigned int sort_ind = 0; sort_ind < data.size(); sort_ind++)
+ {
+ if(vals[column_ind] == eLatency)
+ {
+ props[sort_ind][column_ind] = "colspan=\"2\"";
+ }
+ }
+ }
+ }
+ break;
+ } // end switch
+ }
+}
+
+PCCHAR get_col(double range_col, double val, bool reverse, CPCCHAR extra)
+{
+ if(reverse)
+ val = range_col - val;
+ const int buf_len = 256;
+ PCHAR buf = new char[buf_len];
+ int green = int(255.0 * val / range_col);
+ green = min(green, 255);
+ int red = 255 - green;
+ _snprintf(buf
+#ifndef NO_SNPRINTF
+ , buf_len
+#endif
+ , "bgcolor=\"#%02X%02X00\"%s", red, green, extra);
+ buf[buf_len - 1] = '\0';
+ return buf;
+}
+
+void heading(const char * const head);
+
+int header()
+{
+ int vers_width = 2;
+ if(col_used[COL_DATA_CHUNK_size] == true)
+ vers_width++;
+ if(col_used[COL_CONCURRENCY] == true)
+ vers_width++;
+ int mid_width = 1;
+ if(col_used[COL_MAX_size])
+ mid_width++;
+ if(col_used[COL_MIN_size])
+ mid_width++;
+ if(col_used[COL_NUM_DIRS])
+ mid_width++;
+ if(col_used[COL_FILE_CHUNK_size])
+ mid_width++;
+ printf("<!DOCTYPE html PUBLIC \"-//W3C//Dtd XHTML 1.0 Strict//EN\" \"http://www.w3.org/tr/xhtml1/Dtd/xhtml1-strict.dtd\">"
+"<html xmlns=\"http://www.w3.org/1999/xhtml\">"
+"<head><meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\" /><title>Bonnie++ Benchmark results</title>"
+"<style type=\"text/css\">"
+"td.header {text-align: center; backgroundcolor: \"#CCFFFF\" }"
+"td.rowheader {text-align: center; backgroundcolor: \"#CCCFFF\" }"
+"td.size {text-align: center; backgroundcolor: \"#CCCFFF\" }"
+"td.ksec {text-align: center; fontstyle: italic }"
+"</style></head>"
+"<body>"
+"<table border=\"3\" cellpadding=\"2\" cellspacing=\"1\">"
+"<tr><td colspan=\"%d\" class=\"header\"><font size=+1><b>"
+"Version " BON_VERSION
+"</b></font></td>"
+"<td colspan=\"6\" class=\"header\"><font size=+2><b>Sequential Output</b></font></td>"
+"<td colspan=\"4\" class=\"header\"><font size=+2><b>Sequential Input</b></font></td>"
+"<td colspan=\"2\" rowspan=\"2\" class=\"header\"><font size=+2><b>Random<br>Seeks</b></font></td>"
+"<td colspan=\"%d\" class=\"header\"></td>"
+"<td colspan=\"6\" class=\"header\"><font size=+2><b>Sequential Create</b></font></td>"
+"<td colspan=\"6\" class=\"header\"><font size=+2><b>Random Create</b></font></td>"
+"</tr>\n"
+"<tr>", vers_width, mid_width);
+ if(col_used[COL_CONCURRENCY] == true)
+ printf("<td colspan=\"2\">Concurrency</td>");
+ else
+ printf("<td></td>");
+ printf("<td>Size</td>");
+ if(col_used[COL_DATA_CHUNK_size] == true)
+ printf("<td>Chunk Size</td>");
+ heading("Per Char"); heading("Block"); heading("Rewrite");
+ heading("Per Char"); heading("Block");
+ printf("<td>Num Files</td>");
+ if(col_used[COL_MAX_size])
+ printf("<td>Max Size</td>");
+ if(col_used[COL_MIN_size])
+ printf("<td>Min Size</td>");
+ if(col_used[COL_NUM_DIRS])
+ printf("<td>Num Dirs</td>");
+ if(col_used[COL_FILE_CHUNK_size])
+ printf("<td>Chunk Size</td>");
+ heading("Create"); heading("Read"); heading("Delete");
+ heading("Create"); heading("Read"); heading("Delete");
+ printf("</tr>");
+
+ printf("<tr><td colspan=\"%d\"></td>", vers_width);
+
+ int i;
+ CPCCHAR ksec_form = "<td class=\"ksec\"><font size=-2>%s/sec</font></td>"
+ "<td class=\"ksec\"><font size=-2>%% CPU</font></td>";
+ for(i = 0; i < 5; i++)
+ {
+ printf(ksec_form, "K");
+ }
+ printf(ksec_form, "");
+ printf("<td colspan=\"%d\"></td>", mid_width);
+ for(i = 0; i < 6; i++)
+ {
+ printf(ksec_form, "");
+ }
+ printf("</tr>\n");
+ return mid_width;
+}
+
+void heading(const char * const head)
+{
+ printf("<td colspan=\"2\">%s</td>", head);
+}
+
+void footer()
+{
+ printf("</table>\n</body></html>\n");
+}
+
+STR_VEC split(CPCCHAR delim, CPCCHAR buf)
+{
+ STR_VEC arr;
+ char *tmp = strdup(buf);
+ while(1)
+ {
+ arr.push_back(tmp);
+ tmp = strstr(tmp, delim);
+ if(!tmp)
+ break;
+ *tmp = '\0';
+ tmp += strlen(delim);
+ }
+ return arr;
+}
+
+void read_in(CPCCHAR buf)
+{
+ STR_VEC arr = split(",", buf);
+ if(strcmp(arr[0], CSV_VERSION) )
+ {
+ if(strncmp(arr[0], "format_version", 14))
+ fprintf(stderr, "Can't process: %s\n", buf);
+ free((void *)arr[0]);
+ return;
+ }
+ data.push_back(arr);
+}
+
+void print_item(int num, int item, CPCCHAR extra)
+{
+ PCCHAR line_data;
+ if(int(data[num].size()) > item)
+ line_data = data[num][item];
+ else
+ line_data = "";
+ printf("<td");
+ if(extra)
+ printf(" %s", extra);
+ if(props[num][item])
+ printf(" %s", props[num][item]);
+ printf(">%s</td>", line_data);
+}
+
+void print_a_line(int num, int start, int end)
+{
+ int i;
+ for(i = start; i <= end; i++)
+ {
+ print_item(num, i);
+ }
+}
diff --git a/bon_csv2txt b/bon_csv2txt
new file mode 100644
index 0000000..2a6944c
--- /dev/null
+++ b/bon_csv2txt
@@ -0,0 +1,145 @@
+#!/usr/bin/perl
+
+{
+ my $line;
+ $iocount = 0;
+ $filecount = 0;
+ while($line = <STDIN>)
+ {
+ while($line =~ /^name,/)
+ {
+ $line = <STDIN>;
+ }
+ process($line);
+ }
+ printIOData();
+
+ printFileData();
+}
+
+sub IOHeader
+{
+ my $version = '1.97';
+printf(STDOUT "Version %9s ------Sequential Output------ --Sequential Input- --Random-\n", $version);
+print " -Per Chr- --Block-- -Rewrite- -Per Chr- --Block-- --Seeks--\n";
+print "Machine Size K/sec %CP K/sec %CP K/sec %CP K/sec %CP K/sec %CP /sec %CP\n";
+}
+
+sub FileHeader
+{
+print " ------Sequential Create------ --------Random Create--------\n";
+print " -Create-- --Read--- -Delete-- -Create-- --Read--- -Delete--\n";
+print "files:max:min /sec %CP /sec %CP /sec %CP /sec %CP /sec %CP /sec %CP\n";
+}
+
+sub process
+{
+ my($line) = @_;
+
+ chomp($line);
+ $line =~ s/\r//;
+ my @arr = split(',', $line);
+ my $csv_version = '1.97';
+ if($arr[0] ne $csv_version)
+ {
+ printf(STDERR "Can't process: %s\n", $line);
+ return;
+ }
+ my $name = $arr[2];
+
+# merge size and chunk-size
+ my $tmp = $arr[5];
+ if($arr[6] ne "")
+ {
+ $tmp .= ":" . $arr[6];
+ }
+ $arr[6] = $tmp;
+# $ioline contains the file-IO data, IE we strip off the data on file creation
+ my @ioline = @arr[6 .. 18];
+ if(join('', @ioline) ne "")
+ {
+ $io[$iocount] = $name . "," . join(',', @ioline);
+ if($#arr > 37)
+ {
+ $io[$iocount] .= "," . join(',', @arr[36..41]);
+ }
+ $iocount++;
+ }
+
+# merge num_files, max_size, min_size, num_dirs, and file_chunk_size
+ $tmp = $arr[19];
+ if($arr[20] ne "")
+ {
+ $tmp .= ":" . $arr[20];
+ }
+ if($arr[21] ne "")
+ {
+ $tmp .= ":" . $arr[21];
+ }
+ if($arr[22] ne "")
+ {
+ $tmp .= "/" . $arr[22];
+ }
+ if($arr[23] ne "")
+ {
+ $tmp .= ":" . $arr[23];
+ }
+ $arr[23] = $tmp;
+# $fileline contains the file creation data - everything but $ioline
+ my @fileline = @arr[23 .. 35];
+ if(join('', @fileline) ne "")
+ {
+ $file[$filecount] = $name . "," . join(',', @fileline);
+ if($#arr > 41)
+ {
+ $file[$filecount] .= "," . join(',', @arr[42..$#arr]);
+ }
+ $filecount++;
+ }
+}
+
+sub printFileData
+{
+ if($filecount < 1){ return; }
+ FileHeader();
+ for($i = 0; $i < $filecount; $i++)
+ {
+ @arr = split(/,/,$file[$i]);
+ my $size = $arr[1];
+ $size =~ s/:0:0:/\//;
+ my $desc = $arr[0] . " ";
+ $desc = substr($desc, 0, 18 - length($size) );
+ $desc .= " " . $size;
+ printf(STDOUT "%19s %5s %3s %5s %3s %5s %3s %5s %3s %5s %3s %5s %3s\n"
+ , $desc, $arr[2], $arr[3], $arr[4], $arr[5], $arr[6], $arr[7]
+ , $arr[8], $arr[9], $arr[10], $arr[11], $arr[12], $arr[13]);
+ if($#arr > 13 && join("", @arr[14..$#arr]) ne "")
+ {
+ printf(STDOUT "Latency %9s %9s %9s %9s %9s %9s\n"
+ , $arr[14], $arr[15], $arr[16], $arr[17], $arr[18], $arr[19]);
+ }
+ }
+}
+
+sub printIOData
+{
+ if($iocount < 1){ return; }
+ IOHeader();
+ for($i = 0; $i < $iocount; $i++)
+ {
+ @arr = split(/,/,$io[$i]);
+ my $size = $arr[1];
+ my $desc = $arr[0] . " ";
+ $desc = substr($desc, 0, 18 - length($size) );
+ $desc .= " " . $size;
+ printf(STDOUT "%19s %5s %3s %5s %3s %5s %3s %5s %3s %5s %3s %5s %3s\n"
+ , $desc, $arr[2], $arr[3], $arr[4], $arr[5], $arr[6], $arr[7]
+ , $arr[8], $arr[9], $arr[10], $arr[11], $arr[12], $arr[13]);
+ if($#arr > 13 && join("", @arr[14..$#arr]) ne "")
+ {
+ printf(STDOUT "Latency %9s %9s %9s %9s %9s %9s\n"
+ , $arr[14], $arr[15], $arr[16], $arr[17], $arr[18], $arr[19]);
+ }
+ }
+}
+
diff --git a/bon_csv2txt.1 b/bon_csv2txt.1
new file mode 100644
index 0000000..94d640f
--- /dev/null
+++ b/bon_csv2txt.1
@@ -0,0 +1 @@
+.so man1/bon_csv2html.1
diff --git a/bon_csv2txt.in b/bon_csv2txt.in
new file mode 100755
index 0000000..1ff988a
--- /dev/null
+++ b/bon_csv2txt.in
@@ -0,0 +1,145 @@
+#!/usr/bin/perl
+
+{
+ my $line;
+ $iocount = 0;
+ $filecount = 0;
+ while($line = <STDIN>)
+ {
+ while($line =~ /^name,/)
+ {
+ $line = <STDIN>;
+ }
+ process($line);
+ }
+ printIOData();
+
+ printFileData();
+}
+
+sub IOHeader
+{
+ my $version = '@version@';
+printf(STDOUT "Version %9s ------Sequential Output------ --Sequential Input- --Random-\n", $version);
+print " -Per Chr- --Block-- -Rewrite- -Per Chr- --Block-- --Seeks--\n";
+print "Machine Size K/sec %CP K/sec %CP K/sec %CP K/sec %CP K/sec %CP /sec %CP\n";
+}
+
+sub FileHeader
+{
+print " ------Sequential Create------ --------Random Create--------\n";
+print " -Create-- --Read--- -Delete-- -Create-- --Read--- -Delete--\n";
+print "files:max:min /sec %CP /sec %CP /sec %CP /sec %CP /sec %CP /sec %CP\n";
+}
+
+sub process
+{
+ my($line) = @_;
+
+ chomp($line);
+ $line =~ s/\r//;
+ my @arr = split(',', $line);
+ my $csv_version = '@csv_version@';
+ if($arr[0] ne $csv_version)
+ {
+ printf(STDERR "Can't process: %s\n", $line);
+ return;
+ }
+ my $name = $arr[2];
+
+# merge size and chunk-size
+ my $tmp = $arr[5];
+ if($arr[6] ne "")
+ {
+ $tmp .= ":" . $arr[6];
+ }
+ $arr[6] = $tmp;
+# $ioline contains the file-IO data, IE we strip off the data on file creation
+ my @ioline = @arr[6 .. 18];
+ if(join('', @ioline) ne "")
+ {
+ $io[$iocount] = $name . "," . join(',', @ioline);
+ if($#arr > 37)
+ {
+ $io[$iocount] .= "," . join(',', @arr[36..41]);
+ }
+ $iocount++;
+ }
+
+# merge num_files, max_size, min_size, num_dirs, and file_chunk_size
+ $tmp = $arr[19];
+ if($arr[20] ne "")
+ {
+ $tmp .= ":" . $arr[20];
+ }
+ if($arr[21] ne "")
+ {
+ $tmp .= ":" . $arr[21];
+ }
+ if($arr[22] ne "")
+ {
+ $tmp .= "/" . $arr[22];
+ }
+ if($arr[23] ne "")
+ {
+ $tmp .= ":" . $arr[23];
+ }
+ $arr[23] = $tmp;
+# $fileline contains the file creation data - everything but $ioline
+ my @fileline = @arr[23 .. 35];
+ if(join('', @fileline) ne "")
+ {
+ $file[$filecount] = $name . "," . join(',', @fileline);
+ if($#arr > 41)
+ {
+ $file[$filecount] .= "," . join(',', @arr[42..$#arr]);
+ }
+ $filecount++;
+ }
+}
+
+sub printFileData
+{
+ if($filecount < 1){ return; }
+ FileHeader();
+ for($i = 0; $i < $filecount; $i++)
+ {
+ @arr = split(/,/,$file[$i]);
+ my $size = $arr[1];
+ $size =~ s/:0:0:/\//;
+ my $desc = $arr[0] . " ";
+ $desc = substr($desc, 0, 18 - length($size) );
+ $desc .= " " . $size;
+ printf(STDOUT "%19s %5s %3s %5s %3s %5s %3s %5s %3s %5s %3s %5s %3s\n"
+ , $desc, $arr[2], $arr[3], $arr[4], $arr[5], $arr[6], $arr[7]
+ , $arr[8], $arr[9], $arr[10], $arr[11], $arr[12], $arr[13]);
+ if($#arr > 13 && join("", @arr[14..$#arr]) ne "")
+ {
+ printf(STDOUT "Latency %9s %9s %9s %9s %9s %9s\n"
+ , $arr[14], $arr[15], $arr[16], $arr[17], $arr[18], $arr[19]);
+ }
+ }
+}
+
+sub printIOData
+{
+ if($iocount < 1){ return; }
+ IOHeader();
+ for($i = 0; $i < $iocount; $i++)
+ {
+ @arr = split(/,/,$io[$i]);
+ my $size = $arr[1];
+ my $desc = $arr[0] . " ";
+ $desc = substr($desc, 0, 18 - length($size) );
+ $desc .= " " . $size;
+ printf(STDOUT "%19s %5s %3s %5s %3s %5s %3s %5s %3s %5s %3s %5s %3s\n"
+ , $desc, $arr[2], $arr[3], $arr[4], $arr[5], $arr[6], $arr[7]
+ , $arr[8], $arr[9], $arr[10], $arr[11], $arr[12], $arr[13]);
+ if($#arr > 13 && join("", @arr[14..$#arr]) ne "")
+ {
+ printf(STDOUT "Latency %9s %9s %9s %9s %9s %9s\n"
+ , $arr[14], $arr[15], $arr[16], $arr[17], $arr[18], $arr[19]);
+ }
+ }
+}
+
diff --git a/bon_file.cpp b/bon_file.cpp
new file mode 100644
index 0000000..07aa223
--- /dev/null
+++ b/bon_file.cpp
@@ -0,0 +1,628 @@
+#include "bonnie.h"
+#include <fcntl.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "bon_file.h"
+#include "bon_time.h"
+#include "duration.h"
+
+CPCCHAR rand_chars = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+
+COpenTest::COpenTest(int chunk_size, bool use_sync, bool *doExit)
+ : m_chunk_size(chunk_size)
+ , m_number(0)
+ , m_number_directories(1)
+ , m_max(0)
+ , m_min(0)
+ , m_size_range(0)
+ , m_dirname(NULL)
+ , m_file_name_buf(NULL)
+ , m_file_names(NULL)
+ , m_sync(use_sync)
+ , m_directoryHandles(NULL)
+ , m_dirIndex(NULL)
+ , m_buf(new char[m_chunk_size])
+ , m_exit(doExit)
+ , m_sync_dir(true)
+{
+}
+
+void COpenTest::random_sort(Rand &r)
+{
+ for(int i = 0; i < m_number; i++)
+ {
+ char *tmp = m_file_names[i];
+ int newind = r.getNum() % m_number;
+ m_file_names[i] = m_file_names[newind];
+ m_file_names[newind] = tmp;
+ if(m_dirIndex)
+ {
+ int tmpInd = m_dirIndex[i];
+ m_dirIndex[i] = m_dirIndex[newind];
+ m_dirIndex[newind] = tmpInd;
+ }
+ if(*m_exit) return;
+ }
+}
+
+COpenTest::~COpenTest()
+{
+ int i;
+ if(m_dirname)
+ {
+ fprintf(stderr, "Cleaning up test directory after error.\n");
+ if(m_file_names)
+ {
+ for(i = 0; i < m_number; i++)
+ unlink(m_file_names[i]);
+ }
+ if(m_number_directories > 1)
+ {
+ char buf[6];
+ for(i = 0; i < m_number_directories; i++)
+ {
+ sprintf(buf, "%05d", i);
+ if(rmdir(buf))
+ io_error("rmdir");
+ }
+ }
+ if(chdir("..") || rmdir(m_dirname))
+ io_error("rmdir");
+ delete m_dirname;
+ }
+ if(m_directoryHandles)
+ {
+ for(i = 0; i < m_number_directories; i++)
+ close(m_directoryHandles[i]);
+ delete m_directoryHandles;
+ }
+ delete m_file_name_buf;
+ delete m_file_names;
+ delete m_dirIndex;
+ delete m_buf;
+}
+
+void COpenTest::make_names(Rand &r, bool do_random)
+{
+ delete m_file_name_buf;
+ delete m_file_names;
+ int names_per_directory = m_number / m_number_directories;
+ int names_in_dir = 0;
+ int directory_num = 0;
+ if(!m_dirIndex && m_sync)
+ m_dirIndex = new int[m_number];
+ if(m_number_directories == 1)
+ {
+ m_file_name_buf = new char[(MaxNameLen + 1) * m_number];
+ }
+ else
+ {
+ m_file_name_buf = new char[(MaxNameLen + 1 + 6) * m_number];
+ }
+ m_file_names = new PCHAR[m_number];
+ PCHAR buf = m_file_name_buf;
+ int num_rand_chars = strlen(rand_chars);
+ for(int i = 0; i < m_number; i++)
+ {
+ if(*m_exit)
+ {
+ delete m_file_names;
+ m_file_names = NULL;
+ return;
+ }
+ char rand_buf[RandExtraLen + 1];
+ int len = r.getNum() % (RandExtraLen + 1);
+ int j;
+ for(j = 0; j < len; j++)
+ {
+ rand_buf[j] = rand_chars[r.getNum() % num_rand_chars];
+ }
+ rand_buf[j] = '\0';
+ m_file_names[i] = buf;
+ if(m_number_directories != 1)
+ {
+ sprintf(buf, "%05d/", directory_num);
+ buf += strlen(buf);
+ }
+ if(m_sync)
+ m_dirIndex[i] = directory_num;
+ names_in_dir++;
+ if(names_in_dir > names_per_directory)
+ {
+ names_in_dir = 0;
+ directory_num++;
+ }
+ if(do_random)
+ {
+ sprintf(buf, "%s%010x", rand_buf, i);
+ }
+ else
+ {
+ sprintf(buf, "%010x%s", i, rand_buf);
+ }
+ buf += strlen(buf) + 1;
+ }
+}
+
+int COpenTest::create_a_file(const char *filename, char *buf, int size, int dir)
+{
+ FILE_TYPE fd = 0;
+ int flags = S_IRUSR | S_IWUSR;
+ fd = file_open(filename, O_CREAT|O_EXCL|O_WRONLY, flags);
+
+ if(fd == -1)
+ {
+ fprintf(stderr, "Can't create file %s\n", filename);
+ return -1;
+ }
+ if(m_max)
+ {
+ for(int i = 0; i < size; i += m_chunk_size)
+ {
+ int to_write = size - i;
+ if(to_write > m_chunk_size) to_write = m_chunk_size;
+ if(to_write != write(fd, static_cast<void *>(buf), to_write))
+ {
+ fprintf(stderr, "Can't write data.\n");
+ return -1;
+ }
+ }
+ }
+ if(m_sync)
+ {
+ if(fsync(fd))
+ {
+ fprintf(stderr, "Can't sync file.\n");
+ return -1;
+ }
+ if(m_sync_dir && fsync(m_directoryHandles[dir]))
+ {
+ fprintf(stderr, "Can't sync directory, turning off dir-sync.\n");
+ m_sync_dir = false;
+ }
+ }
+ close(fd);
+ return 0;
+}
+
+int COpenTest::create_a_link(const char *original, const char *filename, int dir)
+{
+ if(m_max == -1)
+ {
+ if(link(original, filename))
+ {
+ fprintf(stderr, "Can't create link %s\n", filename);
+ return -1;
+ }
+ if(m_sync)
+ {
+ if(fsync(m_directoryHandles[dir]))
+ {
+ fprintf(stderr, "Can't sync file.\n");
+ return -1;
+ }
+ }
+ }
+ else
+ {
+ if(symlink(original, filename))
+ {
+ fprintf(stderr, "Can't create symlink %s\n", filename);
+ return -1;
+ }
+ if(m_sync)
+ {
+ if(fsync(m_directoryHandles[dir]))
+ {
+ fprintf(stderr, "Can't sync file.\n");
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+int COpenTest::create(CPCCHAR dirname, BonTimer &timer, int num, int max_size
+ , int min_size, int num_directories, bool do_random)
+{
+ if(num_directories >= 100000)
+ {
+ fprintf(stderr, "Can't have more than 99,999 directories.\n");
+ return -1;
+ }
+
+ m_number = num * DirectoryUnit;
+ m_number_directories = num_directories;
+ make_names(timer.random_source, do_random);
+ m_max = max_size;
+ m_min = min_size;
+ m_size_range = m_max - m_min;
+ m_dirname = new char[strlen(dirname) + 1];
+ strcpy(m_dirname, dirname);
+
+ if(num_directories >= 100000)
+ {
+ fprintf(stderr, "Can't have more than 99,999 directories.\n");
+ return -1;
+ }
+ if(mkdir(dirname, S_IRWXU))
+ {
+ fprintf(stderr, "Can't make directory %s\n", dirname);
+ return -1;
+ }
+ if(chdir(dirname))
+ {
+ fprintf(stderr, "Can't change to directory %s\n", dirname);
+ return -1;
+ }
+ int i;
+ if(m_sync)
+ m_directoryHandles = new FILE_TYPE[num_directories];
+ if(num_directories > 1)
+ {
+ for(i = 0; i < num_directories; i++)
+ {
+ sprintf(m_buf, "%05d", i);
+ if(mkdir(m_buf, S_IRWXU))
+ {
+ fprintf(stderr, "Can't make directory %s\n", m_buf);
+ return -1;
+ }
+ if(m_sync)
+ {
+ m_directoryHandles[i] = open(m_buf, O_RDONLY);
+ if(m_directoryHandles[i] == -1)
+ {
+ fprintf(stderr, "Can't get directory handle.\n");
+ return -1;
+ }
+ }
+ }
+ }
+ else if(m_sync)
+ {
+ m_directoryHandles[0] = open(".", O_RDONLY);
+ if(m_directoryHandles[0] == -1)
+ {
+ fprintf(stderr, "Can't get directory handle.\n");
+ return -1;
+ }
+ }
+
+ Duration dur;
+ timer.start();
+ for(i = 0; i < m_number; i++)
+ {
+ if(*m_exit)
+ {
+ if(m_number_directories != 1 && chdir(".."))
+ {
+ fprintf(stderr, "Can't change to directory ..\n");
+ return -1;
+ }
+ return eCtrl_C;
+ }
+ dur.start();
+ // m_max < 0 means link or sym-link
+ if(m_max < 0)
+ {
+ if(i == 0)
+ {
+ if(create_a_file(m_file_names[0], m_buf, 0, m_dirIndex ? m_dirIndex[0] : 0))
+ return -1;
+ }
+ else
+ {
+ // create_a_link() looks at m_max to see what to do
+ if(create_a_link(m_file_names[0], m_file_names[i], m_dirIndex ? m_dirIndex[i] : 0))
+ return -1;
+ }
+ }
+ else
+ {
+ int size;
+ if(m_size_range)
+ size = m_min + (timer.random_source.getNum() % (m_size_range + 1));
+ else
+ size = m_max;
+ if(create_a_file(m_file_names[i], m_buf, size, m_dirIndex ? m_dirIndex[i] : 0))
+ return -1;
+ }
+ dur.stop();
+ }
+ sync();
+ timer.stop_and_record(do_random ? CreateRand : CreateSeq);
+ timer.add_latency(do_random ? CreateRand : CreateSeq, dur.getMax());
+ return 0;
+}
+
+int COpenTest::delete_random(BonTimer &timer)
+{
+ random_sort(timer.random_source);
+ timer.start();
+ int i;
+ Duration dur;
+ for(i = 0; i < m_number; i++)
+ {
+ dur.start();
+ if(unlink(m_file_names[i]))
+ {
+ fprintf(stderr, "Can't delete file %s\n", m_file_names[i]);
+ return -1;
+ }
+ if(m_sync && m_sync_dir)
+ {
+ if(fsync(m_directoryHandles[m_dirIndex[i]]))
+ {
+ fprintf(stderr, "Can't sync directory, turning off dir-sync.\n");
+ m_sync_dir = false;
+ }
+ }
+ dur.stop();
+ }
+ if(m_number_directories > 1)
+ {
+ char buf[6];
+ for(i = 0; i < m_number_directories; i++)
+ {
+ sprintf(buf, "%05d", i);
+ if(m_sync)
+ {
+ close(m_directoryHandles[i]);
+ }
+ if(rmdir(buf))
+ {
+ io_error("rmdir");
+ return -1;
+ }
+ }
+ }
+ else
+ {
+ if(m_sync)
+ {
+ close(m_directoryHandles[0]);
+ }
+ }
+ if(chdir("..") || rmdir(m_dirname))
+ {
+ io_error("rmdir");
+ return -1;
+ }
+ delete m_dirname;
+ m_dirname = NULL;
+ sync();
+ timer.stop_and_record(DelRand);
+ timer.add_latency(DelRand, dur.getMax());
+ return 0;
+}
+
+int COpenTest::delete_sequential(BonTimer &timer)
+{
+ timer.start();
+ int count = 0;
+ Duration dur;
+ for(int i = 0; i < m_number_directories; i++)
+ {
+ char buf[6];
+ if(m_number_directories != 1)
+ {
+ sprintf(buf, "%05d", i);
+ if(chdir(buf))
+ {
+ fprintf(stderr, "Can't change to directory %s\n", buf);
+ return -1;
+ }
+ }
+ DIR *d = opendir(".");
+ if(!d)
+ {
+ fprintf(stderr, "Can't open directory.\n");
+ if(m_number_directories != 1)
+ {
+ if(chdir(".."))
+ fprintf(stderr, "Can't chdir().\n");
+ }
+ return -1;
+ }
+ dirent *file_ent;
+
+ while(1)
+ {
+ dur.start();
+ file_ent = readdir(d);
+ if(file_ent == NULL)
+ break;
+ if(file_ent->d_name[0] != '.')
+ {
+ if(unlink(file_ent->d_name))
+ {
+ fprintf(stderr, "Can't delete file %s\n", file_ent->d_name);
+ return -1;
+ }
+
+
+ if(m_sync && m_sync_dir)
+ {
+ if(fsync(m_directoryHandles[i]))
+ {
+ fprintf(stderr, "Can't sync directory, turning off dir-sync.\n");
+ m_sync_dir = false;
+ }
+ }
+ count++;
+ }
+ dur.stop();
+ }
+ closedir(d);
+ if(m_sync)
+ {
+ close(m_directoryHandles[i]);
+ }
+ if(m_number_directories != 1)
+ {
+ if(chdir("..") || rmdir(buf))
+ {
+ io_error("rmdir");
+ return -1;
+ }
+ }
+ }
+ if(chdir("..") || rmdir(m_dirname))
+ {
+ io_error("rmdir");
+ return -1;
+ }
+ delete m_dirname;
+ m_dirname = NULL;
+ if(count != m_number)
+ {
+ fprintf(stderr, "Expected %d files but only got %d\n", m_number, count);
+ return -1;
+ }
+ sync();
+ timer.stop_and_record(DelSeq);
+ timer.add_latency(DelSeq, dur.getMax());
+ return 0;
+}
+
+int COpenTest::stat_file(CPCCHAR file)
+{
+ struct stat st;
+ if(stat(file, &st))
+ {
+ fprintf(stderr, "Can't stat file %s\n", file);
+ return -1;
+ }
+ if(st.st_size)
+ {
+ FILE_TYPE fd = 0;
+ int flags = O_RDONLY;
+ fd = open(file, flags);
+ if(fd == -1)
+ {
+ fprintf(stderr, "Can't open file %s\n", file);
+ return -1;
+ }
+ for(int i = 0; i < st.st_size; i += m_chunk_size)
+ {
+ int to_read = st.st_size - i;
+ if(to_read > m_chunk_size)
+ to_read = m_chunk_size;
+
+ if(to_read != read(fd, static_cast<void *>(m_buf), to_read))
+ {
+ fprintf(stderr, "Can't read data.\n");
+ return -1;
+ }
+ }
+ close(fd);
+ }
+ return 0;
+}
+
+int COpenTest::stat_random(BonTimer &timer)
+{
+ random_sort(timer.random_source);
+ timer.start();
+
+ int i;
+ Duration dur;
+ for(i = 0; i < m_number; i++)
+ {
+ dur.start();
+ if(-1 == stat_file(m_file_names[i]))
+ return -1;
+ dur.stop();
+ }
+ timer.stop_and_record(StatRand);
+ timer.add_latency(StatRand, dur.getMax());
+ return 0;
+}
+
+int COpenTest::stat_sequential(BonTimer &timer)
+{
+ timer.start();
+ int count = 0;
+ Duration dur;
+ for(int i = 0; i < m_number_directories; i++)
+ {
+ char buf[6];
+ if(m_number_directories != 1)
+ {
+ sprintf(buf, "%05d", i);
+ if(chdir(buf))
+ {
+ fprintf(stderr, "Can't change to directory %s\n", buf);
+ return -1;
+ }
+ }
+ DIR *d = opendir(".");
+ if(!d)
+ {
+ fprintf(stderr, "Can't open directory.\n");
+ if(m_number_directories != 1)
+ {
+ if(chdir(".."))
+ fprintf(stderr, "Can't chdir().\n");
+ }
+ return -1;
+ }
+ dirent *file_ent;
+ while(1)
+ {
+ dur.start();
+ file_ent = readdir(d);
+ if(file_ent == NULL)
+ break;
+ if(*m_exit)
+ {
+ if(m_number_directories != 1 && chdir(".."))
+ {
+ fprintf(stderr, "Can't change to directory ..\n");
+ return -1;
+ }
+ return eCtrl_C;
+ }
+ if(file_ent->d_name[0] != '.') // our files do not start with a dot
+ {
+ if(-1 == stat_file(file_ent->d_name))
+ {
+ if(m_number_directories != 1)
+ {
+ if(chdir(".."))
+ {
+ fprintf(stderr, "Can't chdir().\n");
+ return -1;
+ }
+ }
+ dur.stop();
+ return -1;
+ }
+ count++;
+ dur.stop();
+ }
+ }
+ closedir(d);
+ if(m_number_directories != 1)
+ {
+ if(chdir(".."))
+ {
+ fprintf(stderr, "Can't change to directory ..\n");
+ return -1;
+ }
+ }
+ }
+ if(count != m_number)
+ {
+ fprintf(stderr, "Expected %d files but only got %d\n", m_number, count);
+ return -1;
+ }
+ timer.stop_and_record(StatSeq);
+ timer.add_latency(StatSeq, dur.getMax());
+ return 0;
+}
+
diff --git a/bon_file.h b/bon_file.h
new file mode 100644
index 0000000..45712f4
--- /dev/null
+++ b/bon_file.h
@@ -0,0 +1,51 @@
+#ifndef BON_IO
+#define BON_IO
+
+#include "bonnie.h"
+class BonTimer;
+class Rand;
+
+typedef unsigned long MASK_TYPE;
+
+class COpenTest
+{
+public:
+ COpenTest(int chunk_size, bool use_sync, bool *doExit);
+ ~COpenTest();
+
+ int create(CPCCHAR dirname, BonTimer &timer, int num, int max_size
+ , int min_size, int num_directories, bool do_random);
+ int delete_random(BonTimer &timer);
+ int delete_sequential(BonTimer &timer);
+ int stat_random(BonTimer &timer);
+ int stat_sequential(BonTimer &timer);
+
+private:
+ void make_names(Rand &r, bool do_random);
+ int stat_file(CPCCHAR file);
+ int create_a_file(const char *filename, char *buf, int size, int dir);
+ int create_a_link(const char *original, const char *filename, int dir);
+
+ const int m_chunk_size;
+ int m_number; // the total number of files to create
+ int m_number_directories; // the number of directories to store files in
+ int m_max; // maximum file size (negative for links)
+ int m_min; // minimum file size
+ int m_size_range; // m_max - m_min
+ char *m_dirname; // name of the master directory
+ char *m_file_name_buf; // buffer to store all file names
+ char **m_file_names; // pointer to entries in m_file_name_buf
+ bool m_sync; // do we sync after every significant operation?
+ FILE_TYPE *m_directoryHandles; // handles to the directories for m_sync
+ int *m_dirIndex; // which directory we are in
+ char *m_buf;
+ bool *m_exit;
+ bool m_sync_dir;
+
+ void random_sort(Rand &r);
+
+ COpenTest(const COpenTest &t);
+ COpenTest & operator =(const COpenTest &t);
+};
+
+#endif
diff --git a/bon_io.cpp b/bon_io.cpp
new file mode 100644
index 0000000..4111481
--- /dev/null
+++ b/bon_io.cpp
@@ -0,0 +1,367 @@
+#include "bonnie.h"
+#include <stdlib.h>
+#include <fcntl.h>
+
+#include <dirent.h>
+#include <unistd.h>
+#include <sys/wait.h>
+#include "sync.h"
+
+#include <string.h>
+#include <limits.h>
+
+#include "bon_io.h"
+#include "bon_time.h"
+
+
+#define END_SEEK_PROCESS INT_MIN
+
+CFileOp::~CFileOp()
+{
+ Close();
+ if(m_name)
+ {
+ unlink(m_name);
+ free(m_name);
+ }
+ delete m_buf;
+}
+
+Thread *CFileOp::newThread(int threadNum)
+{
+ return new CFileOp(threadNum, this);
+}
+
+CFileOp::CFileOp(int threadNum, CFileOp *parent)
+ : Thread(threadNum, parent)
+ , m_timer(parent->m_timer)
+ , m_file_size(parent->m_file_size)
+ , m_fd(-1)
+ , m_isopen(false)
+ , m_name(PCHAR(malloc(strlen(parent->m_name) + 5)))
+ , m_sync(parent->m_sync)
+#ifdef O_DIRECT
+ , m_use_direct_io(parent->m_use_direct_io)
+#endif
+ , m_chunk_bits(parent->m_chunk_bits)
+ , m_chunk_size(parent->m_chunk_size)
+ , m_total_chunks(parent->m_total_chunks)
+ , m_buf(new char[m_chunk_size])
+{
+ strcpy(m_name, parent->m_name);
+}
+
+int CFileOp::action(PVOID)
+{
+ struct report_s seeker_report;
+ if(reopen(false))
+ return 1;
+ int ticket;
+ int rc;
+ Duration dur, test_time;
+ rc = Read(&ticket, sizeof(ticket), 0);
+ CPU_Duration test_cpu;
+ test_time.getTime(&seeker_report.StartTime);
+ test_cpu.start();
+ if(rc == sizeof(ticket) && ticket != END_SEEK_PROCESS) do
+ {
+ bool update = false;
+ if(ticket < 0)
+ {
+ ticket = abs(ticket);
+ update = true;
+ }
+ dur.start();
+ if(doseek(ticket % m_total_chunks, update) )
+ return 1;
+ dur.stop();
+ } while((rc = Read(&ticket, sizeof(ticket), 0)) == sizeof(ticket)
+ && ticket != END_SEEK_PROCESS);
+
+ if(rc != sizeof(ticket))
+ {
+ fprintf(stderr, "Can't read ticket.\n");
+ return 1;
+ }
+ Close();
+ // seeker report is start and end times, CPU used, and latency
+ test_time.getTime(&seeker_report.EndTime);
+ seeker_report.CPU = test_cpu.stop();
+ seeker_report.Latency = dur.getMax();
+ if(Write(&seeker_report, sizeof(seeker_report), 0) != sizeof(seeker_report))
+ {
+ fprintf(stderr, "Can't write report.\n");
+ return 1;
+ }
+ return 0;
+}
+
+int CFileOp::seek_test(Rand &r, bool quiet, Sync &s)
+{
+ int seek_tickets[SeekProcCount + Seeks];
+ int next;
+ for(next = 0; next < Seeks; next++)
+ {
+ seek_tickets[next] = r.getNum();
+ if(seek_tickets[next] < 0)
+ seek_tickets[next] = abs(seek_tickets[next]);
+ if(seek_tickets[next] % UpdateSeek == 0)
+ seek_tickets[next] = -seek_tickets[next];
+ }
+ for( ; next < (Seeks + SeekProcCount); next++)
+ seek_tickets[next] = END_SEEK_PROCESS;
+ if(reopen(false))
+ return 1;
+ go(NULL, SeekProcCount);
+
+ sleep(3);
+ if(s.decrement_and_wait(Lseek))
+ return 1;
+ if(!quiet) fprintf(stderr, "start 'em...");
+ if(Write(seek_tickets, sizeof(seek_tickets), 0) != int(sizeof(seek_tickets)) )
+ {
+ fprintf(stderr, "Can't write tickets.\n");
+ return 1;
+ }
+ Close();
+ for (next = 0; next < SeekProcCount; next++)
+ { /* for each child */
+ struct report_s seeker_report;
+
+ int rc;
+ if((rc = Read(&seeker_report, sizeof(seeker_report), 0))
+ != sizeof(seeker_report))
+ {
+ fprintf(stderr, "Can't read from pipe, expected %d, got %d.\n"
+ , int(sizeof(seeker_report)), rc);
+ return 1;
+ }
+
+ /*
+ * each child writes back its CPU, start & end times. The elapsed time
+ * to do all the seeks is the time the first child started until the
+ * time the last child stopped
+ */
+ m_timer.add_delta_report(seeker_report, Lseek);
+ if(!quiet) fprintf(stderr, "done...");
+ } /* for each child */
+ if(!quiet) fprintf(stderr, "\n");
+ return 0;
+}
+
+int CFileOp::seek(int offset, int whence)
+{
+ OFF_TYPE rc;
+ OFF_TYPE real_offset = offset;
+ real_offset *= m_chunk_size;
+ rc = file_lseek(m_fd, real_offset, whence);
+
+ if(rc == OFF_TYPE(-1))
+ {
+ sprintf(m_buf, "Error in lseek to chunk %d(" OFF_T_PRINTF ")", offset, real_offset);
+ perror(m_buf);
+ return rc;
+ }
+ return 0;
+}
+
+int CFileOp::read_block(PVOID buf)
+{
+ int total = 0;
+ bool printed_error = false;
+ while(total != m_chunk_size)
+ {
+ int rc = read(m_fd, buf, m_chunk_size - total);
+ if(rc == -1)
+ {
+ io_error("re-write read"); // exits program
+ }
+ else if(rc != m_chunk_size)
+ {
+ if(!printed_error)
+ {
+ fprintf(stderr, "Can't read a full block, only got %d bytes.\n", rc);
+ printed_error = true;
+ if(rc == 0)
+ return -1;
+ }
+ }
+ total += rc;
+ }
+ return total;
+}
+
+int CFileOp::read_block_byte(char *buf)
+{
+ char next;
+ for(int i = 0; i < m_chunk_size; i++)
+ {
+ if(read(m_fd, &next, 1) != 1)
+ {
+ fprintf(stderr, "Can't read a byte\n");
+ return -1;
+ }
+ /* just to fool optimizers */
+ buf[int(next)]++;
+ }
+
+ return 0;
+}
+
+int CFileOp::write_block(PVOID buf)
+{
+ int rc = ::write(m_fd, buf, m_chunk_size);
+ if(rc != m_chunk_size)
+ {
+ perror("Can't write block.");
+ return -1;
+ }
+ return rc;
+}
+
+int CFileOp::write_block_byte()
+{
+ for(int i = 0; i < m_chunk_size; i++)
+ {
+ char c = i & 0x7f;
+ if(write(m_fd, &c, 1) != 1)
+ {
+ fprintf(stderr, "Can't write() - disk full?\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int CFileOp::Open(CPCCHAR base_name, bool create)
+{
+ m_name = PCHAR(malloc(strlen(base_name) + 5));
+ strcpy(m_name, base_name);
+ return reopen(create);
+}
+
+CFileOp::CFileOp(BonTimer &timer, int file_size, int chunk_bits, bool use_sync
+#ifdef O_DIRECT
+ , bool use_direct_io
+#endif
+ )
+ : m_timer(timer)
+ , m_file_size(file_size)
+ , m_fd(-1)
+ , m_isopen(false)
+ , m_name(NULL)
+ , m_sync(use_sync)
+#ifdef O_DIRECT
+ , m_use_direct_io(use_direct_io)
+#endif
+ , m_chunk_bits(chunk_bits)
+ , m_chunk_size(1 << m_chunk_bits)
+ , m_total_chunks(Unit / m_chunk_size * file_size)
+ , m_buf(new char[m_chunk_size])
+{
+ if(m_total_chunks / file_size * m_chunk_size != Unit)
+ {
+ fprintf(stderr, "File size %d too big for chunk size %d\n", file_size, m_chunk_size);
+ exit(1);
+ }
+}
+
+int CFileOp::reopen(bool create)
+{
+ if(m_isopen) Close();
+
+ m_isopen = true;
+ if(m_open(m_name, create))
+ return 1;
+ return 0;
+}
+
+int CFileOp::m_open(CPCCHAR base_name, bool create)
+{
+ int flags;
+ if(create)
+ { /* create from scratch */
+ unlink(base_name);
+ flags = O_RDWR | O_CREAT | O_EXCL;
+#ifdef O_DIRECT
+ if(m_use_direct_io)
+ flags |= O_DIRECT;
+#endif
+ }
+ else
+ {
+ flags = O_RDWR;
+#ifdef _LARGEFILE64_SOURCE
+ flags |= O_LARGEFILE;
+#endif
+ }
+ m_fd = file_open(base_name, flags, S_IRUSR | S_IWUSR);
+
+ if(m_fd == -1)
+ {
+ fprintf(stderr, "Can't open file %s\n", base_name);
+ return -1;
+ }
+ return 0;
+}
+
+void CFileOp::Close()
+{
+ if(!m_isopen)
+ return;
+ if(m_fd != -1)
+ {
+ if(fsync(m_fd))
+ fprintf(stderr, "Can't sync file.\n");
+ close(m_fd);
+ }
+ m_isopen = false;
+ m_fd = -1;
+}
+
+
+/*
+ * Do a typical-of-something random I/O. Any serious application that
+ * has a random I/O bottleneck is going to be smart enough to operate
+ * in a page mode, and not stupidly pull individual words out at
+ * odd offsets. To keep the cache from getting too clever, some
+ * pages must be updated. However an application that updated each of
+ * many random pages that it looked at is hard to imagine.
+ * However, it would be wrong to put the update percentage in as a
+ * parameter - the effect is too nonlinear. Need a profile
+ * of what Oracle or Ingres or some such actually does.
+ * Be warned - there is a *sharp* elbow in this curve - on a 1-MiB file,
+ * most substantial unix systems show >2000 random I/Os per second -
+ * obviously they've cached the whole thing and are just doing buffer
+ * copies.
+ */
+int
+CFileOp::doseek(unsigned int where, bool update)
+{
+ if (seek(where, SEEK_SET) == -1)
+ return -1;
+ if (read_block(PVOID(m_buf)) == -1)
+ return -1;
+
+ /* every so often, update a block */
+ if (update)
+ { /* update this block */
+
+ /* touch a byte */
+ m_buf[where % m_chunk_size]--;
+ if(seek(where, SEEK_SET) == -1)
+ return io_error("lseek in doseek update");
+ if (write_block(PVOID(m_buf)) == -1)
+ return -1;
+ if(m_sync)
+ {
+ if(fsync(m_fd))
+ {
+ fprintf(stderr, "Can't sync file.\n");
+ return -1;
+ }
+ }
+ } /* update this block */
+ return 0;
+}
+
diff --git a/bon_io.h b/bon_io.h
new file mode 100644
index 0000000..d3673d1
--- /dev/null
+++ b/bon_io.h
@@ -0,0 +1,56 @@
+#ifndef BON_FILE
+#define BON_FILE
+
+#include "bonnie.h"
+#include "thread.h"
+class Sync;
+class BonTimer;
+class Rand;
+
+class CFileOp : public Thread
+{
+public:
+ CFileOp(BonTimer &timer, int file_size, int chunk_bits, bool use_sync
+#ifdef O_DIRECT
+ , bool use_direct_io = false
+#endif
+ );
+ int Open(CPCCHAR base_name, bool create);
+ ~CFileOp();
+ int write_block_byte();
+ int write_block(PVOID buf);
+ int read_block_byte(char *buf);
+ int read_block(PVOID buf);
+ int seek(int offset, int whence);
+ int doseek(unsigned int where, bool update);
+ int seek_test(Rand &r, bool quiet, Sync &s);
+ void Close();
+ // reopen a file, bool for whether the file should be unlink()'d and creat()'d
+ int reopen(bool create);
+ BonTimer &getTimer() { return m_timer; }
+ int chunks() const { return m_total_chunks; }
+private:
+ virtual int action(PVOID param); // called for seek test
+ virtual Thread *newThread(int threadNum);
+ CFileOp(int threadNum, CFileOp *parent);
+ int m_open(CPCCHAR base_name, bool create);
+
+ BonTimer &m_timer;
+ int m_file_size;
+ FILE_TYPE m_fd;
+ bool m_isopen;
+ char *m_name;
+ bool m_sync;
+#ifdef O_DIRECT
+ bool m_use_direct_io;
+#endif
+ const int m_chunk_bits, m_chunk_size;
+ int m_total_chunks;
+ char *m_buf;
+
+ CFileOp(const CFileOp &f);
+ CFileOp & operator =(const CFileOp &f);
+};
+
+
+#endif
diff --git a/bon_suid.cpp b/bon_suid.cpp
new file mode 100644
index 0000000..50a116b
--- /dev/null
+++ b/bon_suid.cpp
@@ -0,0 +1,84 @@
+#include <pwd.h>
+#include <grp.h>
+#include <unistd.h>
+#include <stdio.h>
+#include "bonnie.h"
+
+int bon_setugid(CPCCHAR userName, CPCCHAR groupName, bool quiet)
+{
+ int id = 0;
+ uid_t userId = 0;
+ gid_t groupId = 0;
+ bool setGroup = false;
+ struct passwd *pw;
+ struct group *gr;
+ if(userName)
+ {
+ if(sscanf(userName, "%d", &id) == 1)
+ {
+ userId = uid_t(id);
+ pw = getpwuid(userId);
+ if(pw)
+ {
+ groupId = pw->pw_gid;
+ setGroup = true;
+ }
+ else
+ {
+ gr = getgrnam("nogroup");
+ if(gr)
+ {
+ groupId = gr->gr_gid;
+ setGroup = true;
+ }
+ }
+ }
+ else
+ {
+ pw = getpwnam(userName);
+ if(!pw)
+ {
+ fprintf(stderr, "Can't find user %s\n", userName);
+ return 1;
+ }
+ userId = pw->pw_uid;
+ groupId = pw->pw_gid;
+ setGroup = true;
+ }
+ }
+ if(groupName)
+ {
+ if(sscanf(groupName, "%d", &id) == 1)
+ {
+ groupId = gid_t(id);
+ setGroup = true;
+ }
+ else
+ {
+ gr = getgrnam(groupName);
+ if(!gr)
+ {
+ fprintf(stderr, "Can't find group %s\n", groupName);
+ return 1;
+ }
+ groupId = gr->gr_gid;
+ setGroup = true;
+ }
+ }
+ if(setGroup)
+ {
+ if(setgid(groupId))
+ {
+ fprintf(stderr, "Can't set gid to %d.\n", int(groupId));
+ return 1;
+ }
+ }
+ if(setuid(userId))
+ {
+ fprintf(stderr, "Can't set uid to %d.\n", int(userId));
+ return 1;
+ }
+ if(!quiet)
+ fprintf(stderr, "Using uid:%d, gid:%d.\n", int(getuid()), int(getgid()));
+ return 0;
+}
diff --git a/bon_time.cpp b/bon_time.cpp
new file mode 100644
index 0000000..5c2e87d
--- /dev/null
+++ b/bon_time.cpp
@@ -0,0 +1,423 @@
+
+#include <stdlib.h>
+
+#include "bon_time.h"
+#include "duration.h"
+#include <time.h>
+#include <string.h>
+
+void BonTimer::start()
+{
+ m_dur.start();
+ m_cpu_dur.start();
+}
+void BonTimer::stop_and_record(tests_t test)
+{
+ m_delta[test].Elapsed = m_dur.stop();
+ m_delta[test].CPU = m_cpu_dur.stop();
+}
+
+void BonTimer::add_delta_report(report_s &rep, tests_t test)
+{
+ if(m_delta[test].CPU == 0.0)
+ {
+ m_delta[test].FirstStart = rep.StartTime;
+ m_delta[test].LastStop = rep.EndTime;
+ }
+ else
+ {
+ m_delta[test].FirstStart = min(m_delta[test].FirstStart, rep.StartTime);
+ m_delta[test].LastStop = max(m_delta[test].LastStop, rep.EndTime);
+ }
+ m_delta[test].CPU += rep.CPU;
+ m_delta[test].Elapsed = m_delta[test].LastStop - m_delta[test].FirstStart;
+ m_delta[test].Latency = max(m_delta[test].Latency, rep.Latency);
+}
+
+BonTimer::BonTimer()
+ : m_type(txt)
+ , m_concurrency(1)
+{
+ Initialize();
+}
+
+void
+BonTimer::Initialize()
+{
+ for(int i = 0; i < TestCount; i++)
+ {
+ m_delta[i].CPU = 0.0;
+ m_delta[i].Elapsed = 0.0;
+ m_delta[i].Latency = 0.0;
+ }
+ random_source.reset();
+}
+
+void
+BonTimer::add_latency(tests_t test, double t)
+{
+ m_delta[test].Latency = max(m_delta[test].Latency, t);
+}
+
+int BonTimer::print_cpu_stat(tests_t test)
+{
+ if(m_delta[test].Elapsed == 0.0)
+ {
+ if(m_type == txt)
+ fprintf(m_fp, " ");
+ else
+ fprintf(m_fp, ",");
+ return 0;
+ }
+ if(m_delta[test].Elapsed < MinTime)
+ {
+ if(m_type == txt)
+ fprintf(m_fp, " +++");
+ else
+ fprintf(m_fp, ",+++");
+ return 0;
+ }
+ int cpu = int(m_delta[test].CPU / m_delta[test].Elapsed * 100.0);
+ if(m_type == txt)
+ fprintf(m_fp, " %3d", cpu);
+ else
+ fprintf(m_fp, ",%d", cpu);
+ return 0;
+}
+
+int BonTimer::print_stat(tests_t test, int test_size)
+{
+ if(m_delta[test].Elapsed == 0.0)
+ {
+ if(m_type == txt)
+ fprintf(m_fp, " ");
+ else
+ fprintf(m_fp, ",");
+ }
+ else if(m_delta[test].Elapsed < MinTime)
+ {
+ if(m_type == txt)
+ fprintf(m_fp, " +++++");
+ else
+ fprintf(m_fp, ",+++++");
+ }
+ else
+ {
+ double stat = double(test_size) / m_delta[test].Elapsed;
+ if(test == Lseek)
+ {
+ if(m_type == txt)
+ {
+ if(stat >= 1000.0)
+ fprintf(m_fp, " %5.0f", stat);
+ else
+ fprintf(m_fp, " %5.1f", stat);
+ }
+ else
+ {
+ if(stat >= 1000.0)
+ fprintf(m_fp, ",%.0f", stat);
+ else
+ fprintf(m_fp, ",%.1f", stat);
+ }
+ }
+ else
+ {
+ if(m_type == txt)
+ fprintf(m_fp, " %5d", int(stat));
+ else
+ fprintf(m_fp, ",%d", int(stat));
+ }
+ }
+ print_cpu_stat(test);
+ return 0;
+}
+
+int BonTimer::print_latency(tests_t test)
+{
+ char buf[10];
+ if(m_delta[test].Latency == 0.0)
+ {
+ buf[0] = '\0';
+ }
+ else
+ {
+ if(m_delta[test].Latency > 99.999999)
+ _snprintf(buf
+#ifndef NO_SNPRINTF
+, sizeof(buf)
+#endif
+ , "%ds", int(m_delta[test].Latency));
+ else if(m_delta[test].Latency > 0.099999)
+ _snprintf(buf
+#ifndef NO_SNPRINTF
+, sizeof(buf)
+#endif
+ , "%dms", int(m_delta[test].Latency * 1000.0));
+ else
+ _snprintf(buf
+#ifndef NO_SNPRINTF
+, sizeof(buf)
+#endif
+ , "%dus", int(m_delta[test].Latency * 1000000.0));
+ }
+ if(m_type == txt)
+ {
+ fprintf(m_fp, " %9s", buf);
+ }
+ else
+ {
+ fprintf(m_fp, ",%s", buf);
+ }
+ return 0;
+}
+
+void
+BonTimer::PrintHeader(FILE *fp)
+{
+ fprintf(fp, "format_version,bonnie_version,name,concurrency,seed,file_size,io_chunk_size,putc,putc_cpu,put_block,put_block_cpu,rewrite,rewrite_cpu,getc,getc_cpu,get_block,get_block_cpu,seeks,seeks_cpu");
+ fprintf(fp, ",num_files,max_size,min_size,num_dirs,file_chunk_size,seq_create,seq_create_cpu,seq_stat,seq_stat_cpu,seq_del,seq_del_cpu,ran_create,ran_create_cpu,ran_stat,ran_stat_cpu,ran_del,ran_del_cpu");
+ fprintf(fp, ",putc_latency,put_block_latency,rewrite_latency,getc_latency,get_block_latency,seeks_latency,seq_create_latency,seq_stat_latency,seq_del_latency,ran_create_latency,ran_stat_latency,ran_del_latency");
+ fprintf(fp, "\n");
+ fflush(NULL);
+}
+
+void print_size(char *buf, unsigned int size, CPCCHAR units)
+{
+ sprintf(buf, "%d", size);
+ int ind = 0;
+ if(units)
+ {
+ if(size == 0)
+ {
+ ind = strlen(buf);
+ buf[ind] = units[0];
+ buf[ind + 1] = '\0';
+ }
+ else
+ {
+ while(size % 1024 == 0 && units[ind + 1] != '\0')
+ {
+ size /= 1024;
+ ind++;
+ }
+ sprintf(buf, "%d%c", size, units[ind]);
+ }
+ }
+ ind = strlen(buf) - 1;
+ if(buf[ind] == ' ')
+ buf[ind] = '\0';
+}
+
+int
+BonTimer::DoReportIO(int file_size, int char_file_size
+ , int io_chunk_size, FILE *fp)
+{
+ int i;
+ m_fp = fp;
+ const int txt_machine_size = 20;
+ PCCHAR separator = ":";
+ if(m_type == csv)
+ separator = ",";
+ if(file_size)
+ {
+ if(m_type == txt)
+ {
+ fprintf(m_fp, "Version %5s ", BON_VERSION);
+ fprintf(m_fp,
+ "------Sequential Output------ --Sequential Input- --Random-\n");
+ fprintf(m_fp, "Concurrency %3d ", m_concurrency);
+ fprintf(m_fp,
+ "-Per Chr- --Block-- -Rewrite- -Per Chr- --Block-- --Seeks--\n");
+ if(io_chunk_size == DefaultChunkSize)
+ fprintf(m_fp, "Machine Size ");
+ else
+ fprintf(m_fp, "Machine Size:chnk ");
+ fprintf(m_fp, "K/sec %%CP K/sec %%CP K/sec %%CP K/sec %%CP K/sec ");
+ fprintf(m_fp, "%%CP /sec %%CP\n");
+ }
+ char size_buf[1024];
+ print_size(size_buf, file_size, "MG");
+ char *tmp = size_buf + strlen(size_buf);
+ if(io_chunk_size != DefaultChunkSize)
+ {
+ strcat(tmp, separator);
+ tmp += strlen(tmp);
+ print_size(tmp, io_chunk_size, " km");
+ }
+ else if(m_type == csv)
+ {
+ strcat(tmp, separator);
+ tmp += strlen(tmp);
+ }
+ char buf[4096];
+ if(m_type == txt)
+ {
+ // copy machine name to buf
+ //
+ _snprintf(buf
+#ifndef NO_SNPRINTF
+, txt_machine_size - 1
+#endif
+ , "%s ", m_name);
+ buf[txt_machine_size - 1] = '\0';
+ // set cur to point to a byte past where we end the machine name
+ // size of the buf - size of the new data - 1 for the space - 1 for the
+ // terminating zero on the string
+ char *cur = &buf[txt_machine_size - strlen(size_buf) - 2];
+ *cur = ' '; // make cur a space
+ cur++; // increment to where we store the size
+ strcpy(cur, size_buf); // copy the size in
+ fputs(buf, m_fp);
+ }
+ else
+ {
+ printf(CSV_VERSION "," BON_VERSION ",%s,%d,%s,%s", m_name
+ , m_concurrency, random_source.getSeed().c_str(), size_buf);
+ }
+ for(i = ByteWrite; i < Lseek; i++)
+ {
+ if(i == ByteWrite || i == ByteRead)
+ print_stat(tests_t(i), char_file_size * 1024);
+ else
+ print_stat(tests_t(i), file_size * 1024);
+ }
+ print_stat(Lseek, Seeks);
+ if(m_type == txt)
+ {
+ fprintf(m_fp, "\nLatency ");
+ for(i = ByteWrite; i <= Lseek; i++)
+ print_latency(tests_t(i));
+ fprintf(m_fp, "\n");
+ }
+ }
+ else if(m_type == csv)
+ {
+ fprintf(m_fp, CSV_VERSION "," BON_VERSION ",%s,%d,%s,,,,,,,,,,,,,,", m_name
+ , m_concurrency, random_source.getSeed().c_str());
+ }
+ return 0;
+}
+
+int
+BonTimer::DoReportFile(int directory_size
+ , int max_size, int min_size, int num_directories
+ , int file_chunk_size, FILE *fp)
+{
+ PCCHAR separator = ":";
+ m_fp = fp;
+ int i;
+ if(m_type == csv)
+ separator = ",";
+ if(directory_size)
+ {
+ char buf[128];
+ char *tmp;
+ sprintf(buf, "%d", directory_size);
+ tmp = &buf[strlen(buf)];
+ if(m_type == csv)
+ {
+ if(max_size == -1)
+ {
+ sprintf(tmp, ",link,");
+ }
+ else if(max_size == -2)
+ {
+ sprintf(tmp, ",symlink,");
+ }
+ else if(max_size)
+ {
+ if(min_size)
+ sprintf(tmp, ",%d,%d", max_size, min_size);
+ else
+ sprintf(tmp, ",%d,", max_size);
+ }
+ else
+ {
+ sprintf(tmp, ",,");
+ }
+ strcat(tmp, separator);
+ tmp += strlen(tmp);
+ if(file_chunk_size != DefaultChunkSize)
+ {
+ tmp++;
+ print_size(tmp, file_chunk_size, " km");
+ }
+ }
+ else
+ {
+ if(max_size == -1)
+ {
+ sprintf(tmp, ":link");
+ }
+ else if(max_size == -2)
+ {
+ sprintf(tmp, ":symlink");
+ }
+ else if(max_size)
+ {
+ sprintf(tmp, ":%d:%d", max_size, min_size);
+ }
+ }
+ tmp = &buf[strlen(buf)];
+ if(num_directories > 1)
+ {
+ if(m_type == txt)
+ sprintf(tmp, "/%d", num_directories);
+ else
+ sprintf(tmp, ",%d", num_directories);
+ }
+ else if(m_type == csv)
+ {
+ sprintf(tmp, ",");
+ }
+ if(m_type == txt)
+ {
+ fprintf(m_fp, "Version %5s ", BON_VERSION);
+ fprintf(m_fp,
+ "------Sequential Create------ --------Random Create--------\n");
+ fprintf(m_fp, "%-19.19s ", m_name);
+ fprintf(m_fp,
+ "-Create-- --Read--- -Delete-- -Create-- --Read--- -Delete--\n");
+ if(min_size)
+ {
+ fprintf(m_fp, "files:max:min ");
+ }
+ else
+ {
+ if(max_size)
+ fprintf(m_fp, "files:max ");
+ else
+ fprintf(m_fp, " files ");
+ }
+ fprintf(m_fp, " /sec %%CP /sec %%CP /sec %%CP /sec %%CP /sec ");
+ fprintf(m_fp, "%%CP /sec %%CP\n");
+ fprintf(m_fp, "%19s", buf);
+ }
+ else
+ {
+ fprintf(m_fp, ",%s", buf);
+ }
+ for(i = CreateSeq; i < TestCount; i++)
+ print_stat(tests_t(i), directory_size * DirectoryUnit);
+ if(m_type == txt)
+ {
+ fprintf(m_fp, "\nLatency ");
+ for(i = CreateSeq; i < TestCount; i++)
+ print_latency(tests_t(i));
+ }
+ }
+ else if(m_type == csv)
+ {
+ fprintf(m_fp, ",,,,,,,,,,,,,,,,,");
+ }
+ if(m_type == csv)
+ {
+ for(i = ByteWrite; i < TestCount; i++)
+ print_latency(tests_t(i));
+ }
+ fprintf(m_fp, "\n");
+ fflush(NULL);
+ return 0;
+}
+
diff --git a/bon_time.h b/bon_time.h
new file mode 100644
index 0000000..0a21426
--- /dev/null
+++ b/bon_time.h
@@ -0,0 +1,73 @@
+#ifndef BON_TIME_H
+#define BON_TIME_H
+
+#include "bonnie.h"
+#include "duration.h"
+#include "rand.h"
+
+struct report_s
+{
+ double CPU;
+ double StartTime;
+ double EndTime;
+ double Latency;
+};
+
+struct delta_s
+{
+ double CPU;
+ double Elapsed;
+ double FirstStart;
+ double LastStop;
+ double Latency;
+};
+
+class BonTimer
+{
+public:
+ enum RepType { csv, txt };
+
+ BonTimer();
+
+ void start();
+ void stop_and_record(tests_t test);
+ void add_delta_report(report_s &rep, tests_t test);
+ int DoReportIO(int file_size, int char_file_size
+ , int io_chunk_size, FILE *fp);
+ int DoReportFile(int directory_size
+ , int max_size, int min_size, int num_directories
+ , int file_chunk_size, FILE *fp);
+ void SetType(RepType type) { m_type = type; }
+ double cpu_so_far();
+ double time_so_far();
+ void PrintHeader(FILE *fp);
+ void Initialize();
+ static double get_cur_time();
+ static double get_cpu_use();
+
+ void add_latency(tests_t test, double t);
+
+ void setMachineName(CPCCHAR name) { m_name = name; }
+
+ void setConcurrency(int con) { m_concurrency = con; }
+
+ Rand random_source;
+
+private:
+ int print_cpu_stat(tests_t test);
+ int print_stat(tests_t test, int test_size);
+ int print_latency(tests_t test);
+
+ delta_s m_delta[TestCount];
+ RepType m_type;
+ FILE *m_fp;
+ Duration m_dur;
+ CPU_Duration m_cpu_dur;
+ PCCHAR m_name;
+ int m_concurrency;
+
+ BonTimer(const BonTimer&);
+ BonTimer &operator=(const BonTimer&);
+};
+
+#endif
diff --git a/bonnie++.8 b/bonnie++.8
new file mode 100644
index 0000000..aed3a6e
--- /dev/null
+++ b/bonnie++.8
@@ -0,0 +1,239 @@
+.TH bonnie++ 8
+.SH "NAME"
+bonnie++ \- program to test hard drive performance.
+
+.SH "SYNOPSIS"
+.B bonnie++
+.I [\-d dir] [\-s size(MiB)[:chunk\-size(b)]]
+.I [\-n number\-to\-stat(*1024)[:max\-size[:min\-size][:num\-directories[:chunk\-size]]]]
+.I [\-m machine\-name] [\-r ram\-size\-in\-MiB] [\-x number\-of\-tests]
+.I [\-u uid\-to\-use:gid\-to\-use] [\-g gid\-to\-use]
+.I [\-q] [\-f size\-for\-char\-io] [\-b] [\-D] [\-p processes | \-y p|s ]
+.I [\-z seed\-num|\-Z random\-file]
+
+.SH "DESCRIPTION"
+This manual page documents briefly the
+.BR bonnie++ ,
+program.
+.P
+Bonnie++ is a program to test hard drives and file systems for performance or
+the lack therof. There are a many different types of file system operations
+which different applications use to different degrees. Bonnie++ tests some of
+them and for each test gives a result of the amount of work done per second
+and the percentage of CPU time this took. For performance results higher
+numbers are better, for CPU usage lower are better (NB a configuration scoring
+a performance result of 2000 and a CPU result of 90% is better in terms of CPU
+use than a configuration delivering performance of 1000 and CPU usage of 60%).
+.P
+There are two sections to the program's operations. The first is to test the
+IO throughput in a fashion that is designed to simulate some types of database
+applications. The second is to test creation, reading, and deleting many small
+files in a fashion similar to the usage patterns of programs such as Squid or
+INN.
+.P
+All the details of the tests performed by Bonnie++ are contained in the file
+.BR /usr/share/doc/bonnie++/readme.html
+
+.SH "OPTIONS"
+For Bonnie++ every option is of the form of a hyphen followed by a letter and
+then the next parameter contains the value.
+.TP
+.B \-d
+the directory to use for the tests.
+.TP
+.B \-s
+the size of the file(s) for IO performance measures in megabytes. If the size
+is greater than 1G then multiple files will be used to store the data, and
+each file will be up to 1G in size. This parameter may include the chunk size
+seperated from the size by a colon. The chunk\-size is measured in bytes and
+must be a power of two from 256 to 1048576, the default is 8192. NB You can
+specify the size in giga\-bytes or the chunk\-size in kilo\-bytes if you add
+\f3g\f1 or \f3k\f1 to the end of the number respectively.
+
+If the specified size is 0 then this test will be skipped.
+.TP
+.B \-n
+the number of files for the file creation test. This is measured in multiples
+of 1024 files. This is because no\-one will want to test less than 1024 files,
+and we need the extra space on braille displays.
+
+If the specified number is 0 then this test will be skipped.
+
+The default for this test is to test with 0 byte files. To use files of other
+sizes you can specify
+.B number:max:min:num\-directories:chunk\-size
+where
+.B max
+is the maximum size and
+.B min
+is the minimum size (both default to 0 if not specified). If minimum and
+maximum sizes are specified then every file will have a random size from the
+range
+.B min..max
+inclusive. If you specify a number of directories then the files will be
+evenly distributed amoung that many sub\-directories.
+
+If
+.B max
+is
+.B \-1
+then hard links will be created instead of files.
+If
+.B max
+is
+.B \-2
+then soft links will be created instead of files.
+
+
+.TP
+.B \-m
+name of the machine \- for display purposes only.
+
+.TP
+.B \-r
+RAM size in megabytes. If you specify this the other parameters will be
+checked to ensure they make sense for a machine of that much RAM. You should
+not need to do this in general use as it should be able to discover the RAM
+size. NB If you specify a size of 0 then all checks will be disabled...
+
+.TP
+.B \-x
+number of test runs. This is useful if you want to perform more than one test.
+It will dump output continuously in CSV format until either the number of
+tests have been completed, or it is killed.
+
+.TP
+.B \-u
+user\-id to use. When running as root specify the UID to use for the tests. It
+is not recommended to use root (since the occasion when a Bonnie++ bug wiped
+out someone's system), so if you really want to run as root then use
+.B \-u root.
+Also if you want to specify the group to run as then use the
+.B user:group
+format. If you specify a user by name but no group then the primary group of
+that user will be chosen. If you specify a user by number and no group then
+the group will be
+.B nogroup.
+
+.TP
+.B \-g
+group\-id to use. Same as using
+.B :group
+for the
+.B \-u
+parameter, just a different way to specify it for compatibility with other
+programs.
+
+.TP
+.B \-q
+quiet mode. If specified then some of the extra informational messages will be
+suppressed. Also the csv data will be the only output on standard out and the
+plain text data will be on standard error. This means you can run
+.B bonnie++ \-q >> file.csv
+to record your csv data.
+
+.TP
+.B \-f size\-for\-char\-io
+fast mode control, skips per\-char IO tests if no parameter, otherwise specifies
+the size of the tests for per\-char IO tests (default 20M).
+
+.TP
+.B \-b
+no write buffering. fsync() after every write.
+
+.TP
+.B \-p
+number of processes to serve semaphores for. This is used to create the
+semaphores for synchronising multiple Bonnie++ processes. All the processes
+which are told to use the semaphore with
+.B \-ys
+will start each test with synchronization. Use "\-p \-1" to delete the semaphore.
+
+.TP
+.B \-y s|p
+perform synchronization before each test. Option
+.B s
+for semaphores and option
+.B p
+for prompting.
+
+.TP
+.B \-D
+use direct IO (O_DIRECT) for the bulk IO tests
+
+.TP
+.B \-z seed
+random number seed to repeat the same test.
+
+.TP
+.B \-Z random\-file
+file containing random data in network byte order.
+.P
+
+.SH "MULTIPLE PROCESSES"
+Run the following commands to run three copies of Bonnie++ simultaneously:
+.P
+bonnie++ -p3
+.P
+bonnie++ -y > out1 &
+.P
+bonnie++ -y > out2 &
+.P
+bonnie++ -y > out3 &
+
+.SH "OUTPUT"
+The primary output is plain\-text in 80 columns which is designed to fit well
+when pasted into email and which will work well with Braille displays.
+.P
+The second type of output is CSV (Comma Seperated Values). This can easily be
+imported into any spread\-sheet or database program. Also I have included
+the programs
+.B bon_csv2html
+and
+.B bon_csv2txt
+to convert CSV data to HTML and plain\-ascii respectively.
+.P
+For every test two numbers are reported, the amount of work done (higher
+numbers are better) and the percentage of CPU time taken to perform the work
+(lower numbers are better). If a test completes in less than 500ms then
+the output will be displayed as "++++". This is because such a test result
+can't be calculated accurately due to rounding errors and I would rather
+display no result than a wrong result.
+.P
+Data volumes for the 80 column text display use "K" for KiB (1024 bytes), "M"
+for MiB (1048576 bytes), and "G" for GiB (1073741824 bytes). So K/sec means
+a multiple of 1024 bytes per second.
+
+.SH "AUTHOR"
+This program, its manual page, and the Debian package were written by
+Russell Coker <russell@coker.com.au>, parts of the program are based on the
+work of Tim Bray <tbray@textuality.com>.
+.P
+The documentation, the Perl scripts, and all the code for testing the creation
+of thousands of files was written by Russell Coker, but the entire package is
+under joint copyright with Tim Bray.
+
+.SH "SIGNALS"
+Handles SIGINT and does a cleanup (which may take some time), a second SIGINT
+or a SIGQUIT will cause it to immediately die.
+.P
+SIGXCPU and SIGXFSZ act like SIGINT.
+.P
+Ignores SIGHUP.
+
+
+.SH "BUGS"
+The random file sizes will add up to different values for different test runs.
+I plan to add some code that checks the sum and ensures that the sum of the
+values will be the same on seperate runs.
+
+.SH "AVAILABILITY"
+The source is available from http://www.coker.com.au/bonnie++ .
+.P
+See http://etbe.coker.com.au/category/benchmark for further information.
+
+.SH "SEE ALSO"
+.BR zcav (8),
+.BR getc_putc (8),
+.BR bon_csv2html (1),
+.BR bon_csv2txt (1)
diff --git a/bonnie++.cpp b/bonnie++.cpp
new file mode 100644
index 0000000..8c5a43a
--- /dev/null
+++ b/bonnie++.cpp
@@ -0,0 +1,809 @@
+
+/*
+ * COPYRIGHT NOTICE:
+ * Copyright (c) Tim Bray, 1990.
+ * Copyright (c) Russell Coker, 1999. I have updated the program, added
+ * support for >2G on 32bit machines, and tests for file creation.
+ * Licensed under the GPL version 2.0.
+ * DISCLAIMER:
+ * This program is provided AS IS with no warranty of any kind, and
+ * The author makes no representation with respect to the adequacy of this
+ * program for any particular purpose or with respect to its adequacy to
+ * produce any particular result, and
+ * The author shall not be liable for loss or damage arising out of
+ * the use of this program regardless of how sustained, and
+ * In no event shall the author be liable for special, direct, indirect
+ * or consequential damage, loss, costs or fees or expenses of any
+ * nature or kind.
+ */
+
+#include "bonnie.h"
+
+#include <stdlib.h>
+
+#include "conf.h"
+#ifdef HAVE_ALGORITHM
+#include <algorithm>
+#else
+#ifdef HAVE_ALGO
+#include <algo>
+#else
+#include <algo.h>
+#endif
+#endif
+
+#include <sys/wait.h>
+#include <unistd.h>
+#include <sys/time.h>
+#include <pwd.h>
+#include <grp.h>
+#include <sys/utsname.h>
+#include "sync.h"
+
+#include <time.h>
+#include "bon_io.h"
+#include "bon_file.h"
+#include "bon_time.h"
+#include "rand.h"
+#include <ctype.h>
+#include <string.h>
+#include <signal.h>
+
+void usage();
+
+class CGlobalItems
+{
+public:
+ bool quiet;
+ int byte_io_size;
+ bool sync_bonnie;
+#ifdef O_DIRECT
+ bool use_direct_io;
+#endif
+ BonTimer timer;
+ int ram;
+ Sync *syn;
+ char *name;
+ bool bufSync;
+ int io_chunk_bits;
+ int file_chunk_bits;
+ int io_chunk_size() const { return m_io_chunk_size; }
+ int file_chunk_size() const { return m_file_chunk_size; }
+ bool *doExit;
+ void set_io_chunk_size(int size)
+ { delete m_buf; pa_new(size, m_buf, m_buf_pa); m_io_chunk_size = size; }
+ void set_file_chunk_size(int size)
+ { delete m_buf; m_buf = new char[__max(size, m_io_chunk_size)]; m_file_chunk_size = size; }
+
+ // Return the page-aligned version of the local buffer
+ char *buf() { return m_buf_pa; }
+
+ CGlobalItems(bool *exitFlag);
+ ~CGlobalItems() { delete name; delete m_buf; delete syn; }
+
+ void decrement_and_wait(int nr_sem);
+
+ void SetName(CPCCHAR path)
+ {
+ delete name;
+ name = new char[strlen(path) + 15];
+ pid_t myPid = getpid();
+ sprintf(name, "%s/Bonnie.%d", path, int(myPid));
+ }
+
+ void setSync(SYNC_TYPE type, int semKey = 0, int num_tests = 0)
+ {
+ syn = new Sync(type, semKey, num_tests);
+ }
+
+private:
+ int m_io_chunk_size;
+ int m_file_chunk_size;
+
+ char *m_buf; // Pointer to the entire buffer
+ char *m_buf_pa; // Pointer to the page-aligned version of the same buffer
+
+ // Implement a page-aligned version of new.
+ // 'p' is the pointer created
+ // 'page_aligned_p' is the page-aligned pointer created
+ void pa_new(unsigned int num_bytes, char *&p, char *&page_aligned_p)
+ {
+ int page_size = getpagesize();
+ p = ::new char [num_bytes + page_size];
+
+ page_aligned_p = (char *)((((unsigned long)p + page_size - 1) / page_size) * page_size);
+ }
+
+ CGlobalItems(const CGlobalItems &f);
+ CGlobalItems & operator =(const CGlobalItems &f);
+};
+
+CGlobalItems::CGlobalItems(bool *exitFlag)
+ : quiet(false)
+ , byte_io_size(DefaultByteIO)
+ , sync_bonnie(false)
+#ifdef O_DIRECT
+ , use_direct_io(false)
+#endif
+ , timer()
+ , ram(0)
+ , syn(NULL)
+ , name(NULL)
+ , bufSync(false)
+ , io_chunk_bits(DefaultChunkBits)
+ , file_chunk_bits(DefaultChunkBits)
+ , doExit(exitFlag)
+ , m_io_chunk_size(DefaultChunkSize)
+ , m_file_chunk_size(DefaultChunkSize)
+ , m_buf(NULL)
+ , m_buf_pa(NULL)
+{
+ pa_new(__max(m_io_chunk_size, m_file_chunk_size), m_buf, m_buf_pa);
+ SetName(".");
+}
+
+void CGlobalItems::decrement_and_wait(int nr_sem)
+{
+ if(syn->decrement_and_wait(nr_sem))
+ exit(1);
+}
+
+int TestDirOps(int directory_size, int max_size, int min_size
+ , int num_directories, CGlobalItems &globals);
+int TestFileOps(int file_size, CGlobalItems &globals);
+
+static bool exitNow;
+static bool already_printed_error;
+
+extern "C"
+{
+ void ctrl_c_handler(int sig, siginfo_t *siginf, void *unused)
+ {
+ if(siginf->si_signo == SIGXCPU)
+ fprintf(stderr, "Exceeded CPU usage.\n");
+ else if(siginf->si_signo == SIGXFSZ)
+ fprintf(stderr, "exceeded file storage limits.\n");
+ exitNow = true;
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ int file_size = DefaultFileSize;
+ int directory_size = DefaultDirectorySize;
+ int directory_max_size = DefaultDirectoryMaxSize;
+ int directory_min_size = DefaultDirectoryMinSize;
+ int num_bonnie_procs = 0;
+ int num_directories = 1;
+ int test_count = -1;
+ const char * machine = NULL;
+ char *userName = NULL, *groupName = NULL;
+ CGlobalItems globals(&exitNow);
+ bool setSize = false;
+
+ exitNow = false;
+ already_printed_error = false;
+
+ struct sigaction sa;
+ sa.sa_sigaction = &ctrl_c_handler;
+ sa.sa_flags = SA_RESETHAND | SA_SIGINFO;
+ if(sigaction(SIGINT, &sa, NULL)
+ || sigaction(SIGXCPU, &sa, NULL)
+ || sigaction(SIGXFSZ, &sa, NULL))
+ {
+ printf("Can't handle SIGINT.\n");
+ return 1;
+ }
+ sa.sa_handler = SIG_IGN;
+ if(sigaction(SIGHUP, &sa, NULL))
+ {
+ printf("Can't handle SIGHUP.\n");
+ return 1;
+ }
+
+#ifdef _SC_PHYS_PAGES
+ int page_size = sysconf(_SC_PAGESIZE);
+ int num_pages = sysconf(_SC_PHYS_PAGES);
+ if(page_size != -1 && num_pages != -1)
+ {
+ globals.ram = page_size/1024 * (num_pages/1024);
+ }
+#endif
+
+ pid_t myPid = 0;
+ myPid = getpid();
+ globals.timer.random_source.seedNum(myPid ^ time(NULL));
+ int concurrency = 1;
+
+ int int_c;
+ while(-1 != (int_c = getopt(argc, argv, "bc:d:f::g:l:m:n:p:qr:s:u:x:y:z:Z:"
+#ifdef O_DIRECT
+ "D"
+#endif
+ )) )
+ {
+ switch(char(int_c))
+ {
+ case '?':
+ case ':':
+ usage();
+ break;
+ case 'b':
+ globals.bufSync = true;
+ break;
+ case 'c':
+ concurrency = atoi(optarg);
+ break;
+ case 'd':
+ if(chdir(optarg))
+ {
+ fprintf(stderr, "Can't change to directory \"%s\".\n", optarg);
+ usage();
+ }
+ break;
+ case 'f':
+ if(optarg)
+ globals.byte_io_size = atoi(optarg);
+ else
+ globals.byte_io_size = 0;
+ break;
+ case 'm':
+ machine = optarg;
+ break;
+ case 'n':
+ {
+ char *sbuf = _strdup(optarg);
+ char *size = strtok(sbuf, ":");
+ directory_size = size_from_str(size, "m");
+ size = strtok(NULL, ":");
+ if(size)
+ {
+ directory_max_size = size_from_str(size, "kmg");
+ size = strtok(NULL, ":");
+ if(size)
+ {
+ directory_min_size = size_from_str(size, "kmg");
+ size = strtok(NULL, ":");
+ if(size)
+ {
+ num_directories = size_from_str(size, "k");
+ size = strtok(NULL, "");
+ if(size)
+ {
+ int tmp = size_from_str(size, "kmg");
+ globals.set_file_chunk_size(tmp);
+ }
+ }
+ }
+ }
+ free(sbuf);
+ }
+ break;
+ case 'p':
+ num_bonnie_procs = atoi(optarg);
+ /* Set semaphore to # of bonnie++ procs
+ to synchronize */
+ break;
+ case 'q':
+ globals.quiet = true;
+ break;
+ case 'r':
+ globals.ram = atoi(optarg);
+ break;
+ case 's':
+ {
+ char *sbuf = _strdup(optarg);
+ char *size = strtok(sbuf, ":");
+#ifdef _LARGEFILE64_SOURCE
+ file_size = size_from_str(size, "gt");
+#else
+ file_size = size_from_str(size, "g");
+#endif
+ size = strtok(NULL, "");
+ if(size)
+ {
+ int tmp = size_from_str(size, "k");
+ globals.set_io_chunk_size(tmp);
+ }
+ setSize = true;
+ free(sbuf);
+ }
+ break;
+ case 'g':
+ if(groupName)
+ usage();
+ groupName = optarg;
+ break;
+ case 'u':
+ {
+ if(userName)
+ usage();
+ userName = _strdup(optarg);
+ int i;
+ for(i = 0; userName[i] && userName[i] != ':'; i++) {}
+
+ if(userName[i] == ':')
+ {
+ if(groupName)
+ usage();
+ userName[i] = '\0';
+ groupName = &userName[i + 1];
+ }
+ }
+ break;
+ case 'x':
+ test_count = atoi(optarg);
+ break;
+ case 'y':
+ /* tell procs to synchronize via previous
+ defined semaphore */
+ switch(tolower(optarg[0]))
+ {
+ case 's':
+ globals.setSync(eSem, SemKey, TestCount);
+ break;
+ case 'p':
+ globals.setSync(ePrompt);
+ break;
+ }
+ globals.sync_bonnie = true;
+ break;
+ case 'z':
+ {
+ UINT tmp;
+ if(sscanf(optarg, "%u", &tmp) == 1)
+ globals.timer.random_source.seedNum(tmp);
+ }
+ break;
+#ifdef O_DIRECT
+ case 'D':
+ /* open file descriptor with direct I/O */
+ globals.use_direct_io = true;
+ break;
+#endif
+ case 'Z':
+ {
+ if(globals.timer.random_source.seedFile(optarg))
+ return eParam;
+ }
+ break;
+ }
+ }
+ if(concurrency < 1 || concurrency > 200)
+ usage();
+ if(!globals.syn)
+ globals.setSync(eNone);
+ if(optind < argc)
+ usage();
+
+ if(globals.ram && !setSize)
+ {
+ if(file_size < (globals.ram * 2))
+ file_size = globals.ram * 2;
+ // round up to the nearest gig
+ if(file_size % 1024 > 512)
+ file_size = file_size + 1024 - (file_size % 1024);
+ }
+#ifndef _LARGEFILE64_SOURCE
+ if(file_size == 2048)
+ file_size = 2047;
+ if(file_size > 2048)
+ {
+ fprintf(stderr, "Large File Support not present, can't do %dM.\n", file_size);
+ usage();
+ }
+#endif
+ globals.byte_io_size = __min(file_size, globals.byte_io_size);
+ globals.byte_io_size = __max(0, globals.byte_io_size);
+
+ if(machine == NULL)
+ {
+ struct utsname utsBuf;
+ if(uname(&utsBuf) != -1)
+ machine = utsBuf.nodename;
+ }
+
+ globals.timer.setMachineName(machine);
+ globals.timer.setConcurrency(concurrency);
+
+ if(userName || groupName)
+ {
+ if(bon_setugid(userName, groupName, globals.quiet))
+ return 1;
+ if(userName)
+ free(userName);
+ }
+ else if(geteuid() == 0)
+ {
+ fprintf(stderr, "You must use the \"-u\" switch when running as root.\n");
+ usage();
+ }
+
+ if(num_bonnie_procs && globals.sync_bonnie)
+ usage();
+
+ if(num_bonnie_procs)
+ {
+ globals.setSync(eSem, SemKey, TestCount);
+ if(num_bonnie_procs == -1)
+ {
+ return globals.syn->clear_sem();
+ }
+ else
+ {
+ return globals.syn->create(num_bonnie_procs);
+ }
+ }
+
+ if(globals.sync_bonnie)
+ {
+ if(globals.syn->get_semid())
+ return 1;
+ }
+
+ if(file_size < 0 || directory_size < 0 || (!file_size && !directory_size) )
+ usage();
+ if(globals.io_chunk_size() < 256 || globals.io_chunk_size() > Unit)
+ usage();
+ if(globals.file_chunk_size() < 256 || globals.file_chunk_size() > Unit)
+ usage();
+ int i;
+ globals.io_chunk_bits = 0;
+ globals.file_chunk_bits = 0;
+ for(i = globals.io_chunk_size(); i > 1; i = i >> 1, globals.io_chunk_bits++)
+ {}
+
+ if(1 << globals.io_chunk_bits != globals.io_chunk_size())
+ usage();
+ for(i = globals.file_chunk_size(); i > 1; i = i >> 1, globals.file_chunk_bits++)
+ {}
+
+ if(1 << globals.file_chunk_bits != globals.file_chunk_size())
+ usage();
+
+ if( (directory_max_size != -1 && directory_max_size != -2)
+ && (directory_max_size < directory_min_size || directory_max_size < 0
+ || directory_min_size < 0) )
+ usage();
+#ifndef _LARGEFILE64_SOURCE
+ if(file_size > (1 << (31 - 20 + globals.io_chunk_bits)) )
+ {
+ fprintf(stderr
+ , "The small chunk size and large IO size make this test impossible in 32bit.\n");
+ usage();
+ }
+#endif
+ if(file_size && globals.ram && (file_size * concurrency) < (globals.ram * 2) )
+ {
+ fprintf(stderr
+ , "File size should be double RAM for good results, RAM is %dM.\n"
+ , globals.ram);
+ usage();
+ }
+
+ // if doing more than one test run then we print a header before the
+ // csv format output.
+ if(test_count > 1)
+ {
+ globals.timer.SetType(BonTimer::csv);
+ globals.timer.PrintHeader(stdout);
+ }
+ for(; test_count > 0 || test_count == -1; test_count--)
+ {
+ globals.timer.Initialize();
+ int rc;
+ rc = TestFileOps(file_size, globals);
+ if(rc) return rc;
+ rc = TestDirOps(directory_size, directory_max_size, directory_min_size
+ , num_directories, globals);
+ if(rc) return rc;
+ // if we are only doing one test run then print a plain-text version of
+ // the results before printing a csv version.
+ if(test_count == -1)
+ {
+ globals.timer.SetType(BonTimer::txt);
+ rc = globals.timer.DoReportIO(file_size, globals.byte_io_size
+ , globals.io_chunk_size(), globals.quiet ? stderr : stdout);
+ rc |= globals.timer.DoReportFile(directory_size
+ , directory_max_size, directory_min_size, num_directories
+ , globals.file_chunk_size()
+ , globals.quiet ? stderr : stdout);
+ }
+ // print a csv version in every case
+ globals.timer.SetType(BonTimer::csv);
+ rc = globals.timer.DoReportIO(file_size, globals.byte_io_size
+ , globals.io_chunk_size(), stdout);
+ rc |= globals.timer.DoReportFile(directory_size
+ , directory_max_size, directory_min_size, num_directories
+ , globals.file_chunk_size(), stdout);
+ if(rc) return rc;
+ }
+ return eNoErr;
+}
+
+int
+TestFileOps(int file_size, CGlobalItems &globals)
+{
+ if(file_size)
+ {
+ CFileOp file(globals.timer, file_size, globals.io_chunk_bits, globals.bufSync
+#ifdef O_DIRECT
+ , globals.use_direct_io
+#endif
+ );
+ int num_chunks;
+ int words;
+ char *buf = globals.buf();
+ int bufindex;
+ int i;
+
+ // default is we have 1M / 8K * 300 chunks = 38400
+ num_chunks = Unit / globals.io_chunk_size() * file_size;
+ int char_io_chunks = Unit / globals.io_chunk_size() * globals.byte_io_size;
+
+ int rc;
+ rc = file.Open(globals.name, true);
+ if(rc)
+ return rc;
+ if(exitNow)
+ return eCtrl_C;
+ Duration dur;
+
+ globals.timer.start();
+ if(char_io_chunks)
+ {
+ dur.reset();
+ globals.decrement_and_wait(ByteWrite);
+ // Fill up a file, writing it a char at a time
+ if(!globals.quiet) fprintf(stderr, "Writing a byte at a time...");
+ for(words = 0; words < char_io_chunks; words++)
+ {
+ dur.start();
+ if(file.write_block_byte() == -1)
+ return 1;
+ dur.stop();
+ if(exitNow)
+ return eCtrl_C;
+ }
+ fflush(NULL);
+ /*
+ * note that we always close the file before measuring time, in an
+ * effort to force as much of the I/O out as we can
+ */
+ file.Close();
+ globals.timer.stop_and_record(ByteWrite);
+ globals.timer.add_latency(ByteWrite, dur.getMax());
+ if(!globals.quiet) fprintf(stderr, "done\n");
+ }
+ /* Write the whole file from scratch, again, with block I/O */
+ if(file.reopen(true))
+ return 1;
+ dur.reset();
+ globals.decrement_and_wait(FastWrite);
+ if(!globals.quiet) fprintf(stderr, "Writing intelligently...");
+ memset(buf, 0, globals.io_chunk_size());
+ globals.timer.start();
+ bufindex = 0;
+ // for the number of chunks of file data
+ for(i = 0; i < num_chunks; i++)
+ {
+ if(exitNow)
+ return eCtrl_C;
+ // for each chunk in the Unit
+ buf[bufindex]++;
+ bufindex = (bufindex + 1) % globals.io_chunk_size();
+ dur.start();
+ if(file.write_block(PVOID(buf)) == -1)
+ {
+ fprintf(stderr, "Can't write block %d.\n", i);
+ return 1;
+ }
+ dur.stop();
+ }
+ file.Close();
+ globals.timer.stop_and_record(FastWrite);
+ globals.timer.add_latency(FastWrite, dur.getMax());
+ if(!globals.quiet) fprintf(stderr, "done\n");
+
+
+ /* Now read & rewrite it using block I/O. Dirty one word in each block */
+ if(file.reopen(false))
+ return 1;
+ if (file.seek(0, SEEK_SET) == -1)
+ {
+ if(!globals.quiet) fprintf(stderr, "error in lseek(2) before rewrite\n");
+ return 1;
+ }
+ dur.reset();
+ globals.decrement_and_wait(ReWrite);
+ if(!globals.quiet) fprintf(stderr, "Rewriting...");
+ globals.timer.start();
+ bufindex = 0;
+ for(words = 0; words < num_chunks; words++)
+ { // for each chunk in the file
+ dur.start();
+ if (file.read_block(PVOID(buf)) == -1)
+ return 1;
+ bufindex = bufindex % globals.io_chunk_size();
+ buf[bufindex]++;
+ bufindex++;
+ if (file.seek(-1, SEEK_CUR) == -1)
+ return 1;
+ if (file.write_block(PVOID(buf)) == -1)
+ return io_error("re write(2)");
+ dur.stop();
+ if(exitNow)
+ return eCtrl_C;
+ }
+ file.Close();
+ globals.timer.stop_and_record(ReWrite);
+ globals.timer.add_latency(ReWrite, dur.getMax());
+ if(!globals.quiet) fprintf(stderr, "done\n");
+
+ if(char_io_chunks)
+ {
+ // read them all back a byte at a time
+ if(file.reopen(false))
+ return 1;
+ dur.reset();
+ globals.decrement_and_wait(ByteRead);
+ if(!globals.quiet) fprintf(stderr, "Reading a byte at a time...");
+ globals.timer.start();
+
+ for(words = 0; words < char_io_chunks; words++)
+ {
+ dur.start();
+ if(file.read_block_byte(buf) == -1)
+ return 1;
+ dur.stop();
+ if(exitNow)
+ return eCtrl_C;
+ }
+
+ file.Close();
+ globals.timer.stop_and_record(ByteRead);
+ globals.timer.add_latency(ByteRead, dur.getMax());
+ if(!globals.quiet) fprintf(stderr, "done\n");
+ }
+
+ /* Now suck it in, Chunk at a time, as fast as we can */
+ if(file.reopen(false))
+ return 1;
+ if (file.seek(0, SEEK_SET) == -1)
+ return io_error("lseek before read");
+ dur.reset();
+ globals.decrement_and_wait(FastRead);
+ if(!globals.quiet) fprintf(stderr, "Reading intelligently...");
+ globals.timer.start();
+ for(i = 0; i < num_chunks; i++)
+ { /* per block */
+ dur.start();
+ if ((words = file.read_block(PVOID(buf))) == -1)
+ return io_error("read(2)");
+ dur.stop();
+ if(exitNow)
+ return eCtrl_C;
+ } /* per block */
+ file.Close();
+ globals.timer.stop_and_record(FastRead);
+ globals.timer.add_latency(FastRead, dur.getMax());
+ if(!globals.quiet) fprintf(stderr, "done\n");
+
+ globals.timer.start();
+ if(file.seek_test(globals.timer.random_source, globals.quiet, *globals.syn))
+ return 1;
+
+ /*
+ * Now test random seeks; first, set up for communicating with children.
+ * The object of the game is to do "Seeks" lseek() calls as quickly
+ * as possible. So we'll farm them out among SeekProcCount processes.
+ * We'll control them by writing 1-byte tickets down a pipe which
+ * the children all read. We write "Seeks" bytes with val 1, whichever
+ * child happens to get them does it and the right number of seeks get
+ * done.
+ * The idea is that since the write() of the tickets is probably
+ * atomic, the parent process likely won't get scheduled while the
+ * children are seeking away. If you draw a picture of the likely
+ * timelines for three children, it seems likely that the seeks will
+ * overlap very nicely with the process scheduling with the effect
+ * that there will *always* be a seek() outstanding on the file.
+ * Question: should the file be opened *before* the fork, so that
+ * all the children are lseeking on the same underlying file object?
+ */
+ }
+ return eNoErr;
+}
+
+int
+TestDirOps(int directory_size, int max_size, int min_size
+ , int num_directories, CGlobalItems &globals)
+{
+ COpenTest open_test(globals.file_chunk_size(), globals.bufSync, globals.doExit);
+ if(!directory_size)
+ {
+ return 0;
+ }
+ // if directory_size (in K) * data per file*2 > (ram << 10) (IE memory /1024)
+ // then the storage of file names will take more than half RAM and there
+ // won't be enough RAM to have Bonnie++ paged in and to have a reasonable
+ // meta-data cache.
+ if(globals.ram && directory_size * MaxDataPerFile * 2 > (globals.ram << 10))
+ {
+ fprintf(stderr
+ , "When testing %dK of files in %d MiB of RAM the system is likely to\n"
+ "start paging Bonnie++ data and the test will give suspect\n"
+ "results, use less files or install more RAM for this test.\n"
+ , directory_size, globals.ram);
+ return eParam;
+ }
+ // Can't use more than 1G of RAM
+ if(directory_size * MaxDataPerFile > (1 << 20))
+ {
+ fprintf(stderr, "Not enough ram to test with %dK files.\n"
+ , directory_size);
+ return eParam;
+ }
+ globals.decrement_and_wait(CreateSeq);
+ if(!globals.quiet) fprintf(stderr, "Create files in sequential order...");
+ if(open_test.create(globals.name, globals.timer, directory_size
+ , max_size, min_size, num_directories, false))
+ return 1;
+ globals.decrement_and_wait(StatSeq);
+ if(!globals.quiet) fprintf(stderr, "done.\nStat files in sequential order...");
+ if(open_test.stat_sequential(globals.timer))
+ return 1;
+ globals.decrement_and_wait(DelSeq);
+ if(!globals.quiet) fprintf(stderr, "done.\nDelete files in sequential order...");
+ if(open_test.delete_sequential(globals.timer))
+ return 1;
+ if(!globals.quiet) fprintf(stderr, "done.\n");
+
+ globals.decrement_and_wait(CreateRand);
+ if(!globals.quiet) fprintf(stderr, "Create files in random order...");
+ if(open_test.create(globals.name, globals.timer, directory_size
+ , max_size, min_size, num_directories, true))
+ return 1;
+ globals.decrement_and_wait(StatRand);
+ if(!globals.quiet) fprintf(stderr, "done.\nStat files in random order...");
+ if(open_test.stat_random(globals.timer))
+ return 1;
+ globals.decrement_and_wait(DelRand);
+ if(!globals.quiet) fprintf(stderr, "done.\nDelete files in random order...");
+ if(open_test.delete_random(globals.timer))
+ return 1;
+ if(!globals.quiet) fprintf(stderr, "done.\n");
+ return eNoErr;
+}
+
+void
+usage()
+{
+ fprintf(stderr, "usage:\n"
+ "bonnie++ [-d scratch-dir] [-c concurrency] [-s size(MiB)[:chunk-size(b)]]\n"
+ " [-n number-to-stat[:max-size[:min-size][:num-directories[:chunk-size]]]]\n"
+ " [-m machine-name] [-r ram-size-in-MiB]\n"
+ " [-x number-of-tests] [-u uid-to-use:gid-to-use] [-g gid-to-use]\n"
+ " [-q] [-f] [-b] [-p processes | -y] [-z seed | -Z random-file]\n"
+#ifdef O_DIRECT
+ " [-D]\n"
+#endif
+ "\nVersion: " BON_VERSION "\n");
+ exit(eParam);
+}
+
+int
+io_error(CPCCHAR message, bool do_exit)
+{
+ char buf[1024];
+
+ if(!already_printed_error && !do_exit)
+ {
+ sprintf(buf, "Bonnie: drastic I/O error (%s)", message);
+ perror(buf);
+ already_printed_error = 1;
+ }
+ if(do_exit)
+ exit(1);
+ return(1);
+}
+
diff --git a/bonnie++.spec b/bonnie++.spec
new file mode 100644
index 0000000..8009687
--- /dev/null
+++ b/bonnie++.spec
@@ -0,0 +1,48 @@
+Summary: A program for benchmarking hard drives and filesystems
+Name: bonnie++
+Version: 1.97
+Release: 1
+Copyright: GPL
+Group: Utilities/Benchmarking
+URL: http://www.coker.com.au/bonnie++
+Source: http://www.coker.com.au/bonnie++/experimental/bonnie++-%{version}.tgz
+BuildRoot: /tmp/%{name}-buildroot
+Prefixes: %{_prefix} %{_datadir}
+Requires: glibc >= 2.1
+Requires: perl >= 5.0
+BuildRequires: glibc-devel >= 2.1
+
+%description
+Bonnie++ is a benchmark suite that is aimed at performing a number of simple
+tests of hard drive and file system performance.
+
+%prep
+%setup -q
+
+%build
+./configure --prefix=$RPM_BUILD_ROOT%{_prefix} --mandir=3D$RPM_BUILD_ROOT%{_mandir}
+make
+
+%install
+rm -rf $RPM_BUILD_ROOT
+make install
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%files
+%defattr(-,root,root)
+%doc changelog.txt readme.html
+
+%{_prefix}/sbin/bonnie++
+%{_prefix}/sbin/zcav
+%{_prefix}/bin/bon_csv2html
+%{_prefix}/bin/bon_csv2txt
+%{_mandir}/man1/bon_csv2html.1
+%{_mandir}/man1/bon_csv2txt.1
+%{_mandir}/man8/bonnie++.8
+%{_mandir}/man8/zcav.8
+
+%changelog
+* Wed Sep 06 2000 Rob Latham <rlatham@plogic.com>
+- first packaging
diff --git a/bonnie++.spec.in b/bonnie++.spec.in
new file mode 100644
index 0000000..8261de0
--- /dev/null
+++ b/bonnie++.spec.in
@@ -0,0 +1,48 @@
+Summary: A program for benchmarking hard drives and filesystems
+Name: bonnie++
+Version: @version@
+Release: 1
+Copyright: GPL
+Group: Utilities/Benchmarking
+URL: http://www.coker.com.au/bonnie++
+Source: http://www.coker.com.au/bonnie++/experimental/bonnie++-%{version}.tgz
+BuildRoot: /tmp/%{name}-buildroot
+Prefixes: %{_prefix} %{_datadir}
+Requires: glibc >= 2.1
+Requires: perl >= 5.0
+BuildRequires: glibc-devel >= 2.1
+
+%description
+Bonnie++ is a benchmark suite that is aimed at performing a number of simple
+tests of hard drive and file system performance.
+
+%prep
+%setup -q
+
+%build
+./configure --prefix=$RPM_BUILD_ROOT%{_prefix} --mandir=3D$RPM_BUILD_ROOT%{_mandir}
+make
+
+%install
+rm -rf $RPM_BUILD_ROOT
+make install
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%files
+%defattr(-,root,root)
+%doc changelog.txt readme.html
+
+%{_prefix}/sbin/bonnie++
+%{_prefix}/sbin/zcav
+%{_prefix}/bin/bon_csv2html
+%{_prefix}/bin/bon_csv2txt
+%{_mandir}/man1/bon_csv2html.1
+%{_mandir}/man1/bon_csv2txt.1
+%{_mandir}/man8/bonnie++.8
+%{_mandir}/man8/zcav.8
+
+%changelog
+* Wed Sep 06 2000 Rob Latham <rlatham@plogic.com>
+- first packaging
diff --git a/bonnie.h b/bonnie.h
new file mode 100644
index 0000000..f64db7d
--- /dev/null
+++ b/bonnie.h
@@ -0,0 +1,69 @@
+#ifndef BONNIE
+#define BONNIE
+
+using namespace std;
+
+#define BON_VERSION "1.97"
+#define CSV_VERSION "1.97"
+
+#include "port.h"
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+typedef FILE *PFILE;
+
+#define SemKey 4711
+#define NumSems TestCount
+// 9 billion files (10) + up to 12 random extra chars
+#define RandExtraLen (12)
+#define MaxNameLen (10 + RandExtraLen)
+// data includes index to which directory (6 bytes) and terminating '\0' for
+// the name and pointer to file name
+#define MaxDataPerFile (MaxNameLen + 6 + 1 + sizeof(void *))
+#define MinTime (0.5)
+#define Seeks (8192)
+#define UpdateSeek (10)
+#define SeekProcCount (5)
+#define DefaultChunkBits (13)
+#define DefaultChunkSize (1 << DefaultChunkBits)
+#define UnitBits (20)
+#define Unit (1 << UnitBits)
+#define CreateNameLen 6
+#define DefaultFileSize 300
+#define DirectoryUnit 1024
+#define DefaultDirectorySize 16
+#define DefaultDirectoryMaxSize 0
+#define DefaultDirectoryMinSize 0
+#define DefaultByteIO 3
+
+enum tests_t
+{
+ ByteWrite = 0,
+ FastWrite,
+ ReWrite,
+ ByteRead,
+ FastRead,
+ Lseek,
+ CreateSeq,
+ StatSeq,
+ DelSeq,
+ CreateRand,
+ StatRand,
+ DelRand,
+ TestCount
+};
+
+int io_error(CPCCHAR message, bool do_exit = false);
+int bon_setugid(CPCCHAR user, CPCCHAR group, bool quiet);
+unsigned int size_from_str(CPCCHAR str, CPCCHAR conv);
+
+typedef enum
+{
+ eNoErr = 0,
+ eParam = 1,
+ eCtrl_C = 5
+} ERROR_TYPE;
+
+#endif
diff --git a/bonnie.h.in b/bonnie.h.in
new file mode 100644
index 0000000..fb67fe3
--- /dev/null
+++ b/bonnie.h.in
@@ -0,0 +1,69 @@
+#ifndef BONNIE
+#define BONNIE
+
+using namespace std;
+
+#define BON_VERSION "@version@"
+#define CSV_VERSION "@csv_version@"
+
+#include "port.h"
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+typedef FILE *PFILE;
+
+#define SemKey 4711
+#define NumSems TestCount
+// 9 billion files (10) + up to 12 random extra chars
+#define RandExtraLen (12)
+#define MaxNameLen (10 + RandExtraLen)
+// data includes index to which directory (6 bytes) and terminating '\0' for
+// the name and pointer to file name
+#define MaxDataPerFile (MaxNameLen + 6 + 1 + sizeof(void *))
+#define MinTime (0.5)
+#define Seeks (8192)
+#define UpdateSeek (10)
+#define SeekProcCount (5)
+#define DefaultChunkBits (13)
+#define DefaultChunkSize (1 << DefaultChunkBits)
+#define UnitBits (20)
+#define Unit (1 << UnitBits)
+#define CreateNameLen 6
+#define DefaultFileSize 300
+#define DirectoryUnit 1024
+#define DefaultDirectorySize 16
+#define DefaultDirectoryMaxSize 0
+#define DefaultDirectoryMinSize 0
+#define DefaultByteIO 3
+
+enum tests_t
+{
+ ByteWrite = 0,
+ FastWrite,
+ ReWrite,
+ ByteRead,
+ FastRead,
+ Lseek,
+ CreateSeq,
+ StatSeq,
+ DelSeq,
+ CreateRand,
+ StatRand,
+ DelRand,
+ TestCount
+};
+
+int io_error(CPCCHAR message, bool do_exit = false);
+int bon_setugid(CPCCHAR user, CPCCHAR group, bool quiet);
+unsigned int size_from_str(CPCCHAR str, CPCCHAR conv);
+
+typedef enum
+{
+ eNoErr = 0,
+ eParam = 1,
+ eCtrl_C = 5
+} ERROR_TYPE;
+
+#endif
diff --git a/changelog.txt b/changelog.txt
new file mode 120000
index 0000000..d526672
--- /dev/null
+++ b/changelog.txt
@@ -0,0 +1 @@
+debian/changelog \ No newline at end of file
diff --git a/conf.h b/conf.h
new file mode 100644
index 0000000..196c58b
--- /dev/null
+++ b/conf.h
@@ -0,0 +1,10 @@
+/* conf.h. Generated from conf.h.in by configure. */
+#ifndef CONF_H
+#define CONF_H
+
+/* #undef HAVE_ALGO_H */
+/* #undef HAVE_ALGO */
+#define HAVE_ALGORITHM 1
+/* #undef HAVE_MIN_MAX */
+
+#endif
diff --git a/conf.h.in b/conf.h.in
new file mode 100644
index 0000000..83b2b35
--- /dev/null
+++ b/conf.h.in
@@ -0,0 +1,9 @@
+#ifndef CONF_H
+#define CONF_H
+
+#undef HAVE_ALGO_H
+#undef HAVE_ALGO
+#undef HAVE_ALGORITHM
+#undef HAVE_MIN_MAX
+
+#endif
diff --git a/configure b/configure
new file mode 100755
index 0000000..4495abe
--- /dev/null
+++ b/configure
@@ -0,0 +1,5294 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.67.
+#
+#
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
+# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software
+# Foundation, Inc.
+#
+#
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='print -r --'
+ as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
+ else
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in #(
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
+ fi
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+case $0 in #((
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there. '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+if test "x$CONFIG_SHELL" = x; then
+ as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '\${1+\"\$@\"}'='\"\$@\"'
+ setopt NO_GLOB_SUBST
+else
+ case \`(set -o) 2>/dev/null\` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+"
+ as_required="as_fn_return () { (exit \$1); }
+as_fn_success () { as_fn_return 0; }
+as_fn_failure () { as_fn_return 1; }
+as_fn_ret_success () { return 0; }
+as_fn_ret_failure () { return 1; }
+
+exitcode=0
+as_fn_success || { exitcode=1; echo as_fn_success failed.; }
+as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
+as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
+as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
+if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
+
+else
+ exitcode=1; echo positional parameters were not saved.
+fi
+test x\$exitcode = x0 || exit 1"
+ as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
+ as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
+ eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
+ test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1
+test \$(( 1 + 1 )) = 2 || exit 1"
+ if (eval "$as_required") 2>/dev/null; then :
+ as_have_required=yes
+else
+ as_have_required=no
+fi
+ if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then :
+
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_found=false
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ as_found=:
+ case $as_dir in #(
+ /*)
+ for as_base in sh bash ksh sh5; do
+ # Try only shells that exist, to save several forks.
+ as_shell=$as_dir/$as_base
+ if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+ { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then :
+ CONFIG_SHELL=$as_shell as_have_required=yes
+ if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then :
+ break 2
+fi
+fi
+ done;;
+ esac
+ as_found=false
+done
+$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
+ { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then :
+ CONFIG_SHELL=$SHELL as_have_required=yes
+fi; }
+IFS=$as_save_IFS
+
+
+ if test "x$CONFIG_SHELL" != x; then :
+ # We cannot yet assume a decent shell, so we have to provide a
+ # neutralization value for shells without unset; and this also
+ # works around shells that cannot unset nonexistent variables.
+ BASH_ENV=/dev/null
+ ENV=/dev/null
+ (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+ export CONFIG_SHELL
+ exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"}
+fi
+
+ if test x$as_have_required = xno; then :
+ $as_echo "$0: This script requires a shell more modern than all"
+ $as_echo "$0: the shells that I found on your system."
+ if test x${ZSH_VERSION+set} = xset ; then
+ $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should"
+ $as_echo "$0: be upgraded to zsh 4.3.4 or later."
+ else
+ $as_echo "$0: Please tell bug-autoconf@gnu.org about your system,
+$0: including any error possibly output before this
+$0: message. Then install a modern shell, or manually run
+$0: the script under such a shell if you do have one."
+ fi
+ exit 1
+fi
+fi
+fi
+SHELL=${CONFIG_SHELL-/bin/sh}
+export SHELL
+# Unset more variables known to interfere with behavior of common tools.
+CLICOLOR_FORCE= GREP_OPTIONS=
+unset CLICOLOR_FORCE GREP_OPTIONS
+
+## --------------------- ##
+## M4sh Shell Functions. ##
+## --------------------- ##
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+ { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+ return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+ set +e
+ as_fn_set_status $1
+ exit $1
+} # as_fn_exit
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || eval $as_mkdir_p || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+ eval 'as_fn_append ()
+ {
+ eval $1+=\$2
+ }'
+else
+ as_fn_append ()
+ {
+ eval $1=\$$1\$2
+ }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+ eval 'as_fn_arith ()
+ {
+ as_val=$(( $* ))
+ }'
+else
+ as_fn_arith ()
+ {
+ as_val=`expr "$@" || test $? -eq 1`
+ }
+fi # as_fn_arith
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+ fi
+ $as_echo "$as_me: error: $2" >&2
+ as_fn_exit $as_status
+} # as_fn_error
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+
+ as_lineno_1=$LINENO as_lineno_1a=$LINENO
+ as_lineno_2=$LINENO as_lineno_2a=$LINENO
+ eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
+ test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
+ # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+ case `echo 'xy\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ xy) ECHO_C='\c';;
+ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null
+ ECHO_T=' ';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -p'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -p'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -p'
+ fi
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p='mkdir -p "$as_dir"'
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+if test -x / >/dev/null 2>&1; then
+ as_test_x='test -x'
+else
+ if ls -dL / >/dev/null 2>&1; then
+ as_ls_L_option=L
+ else
+ as_ls_L_option=
+ fi
+ as_test_x='
+ eval sh -c '\''
+ if test -d "$1"; then
+ test -d "$1/.";
+ else
+ case $1 in #(
+ -*)set "./$1";;
+ esac;
+ case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
+ ???[sx]*):;;*)false;;esac;fi
+ '\'' sh
+ '
+fi
+as_executable_p=$as_test_x
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+test -n "$DJDIR" || exec 7<&0 </dev/null
+exec 6>&1
+
+# Name of the host.
+# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_clean_files=
+ac_config_libobj_dir=.
+LIBOBJS=
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+
+# Identity of this package.
+PACKAGE_NAME=
+PACKAGE_TARNAME=
+PACKAGE_VERSION=
+PACKAGE_STRING=
+PACKAGE_BUGREPORT=
+PACKAGE_URL=
+
+ac_unique_file="bonnie++.cpp"
+# Factoring default headers for most tests.
+ac_includes_default="\
+#include <stdio.h>
+#ifdef HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+#ifdef STDC_HEADERS
+# include <stdlib.h>
+# include <stddef.h>
+#else
+# ifdef HAVE_STDLIB_H
+# include <stdlib.h>
+# endif
+#endif
+#ifdef HAVE_STRING_H
+# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
+# include <memory.h>
+# endif
+# include <string.h>
+#endif
+#ifdef HAVE_STRINGS_H
+# include <strings.h>
+#endif
+#ifdef HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#ifdef HAVE_STDINT_H
+# include <stdint.h>
+#endif
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif"
+
+ac_subst_vars='LTLIBOBJS
+LIBOBJS
+large_file
+thread_ldflags
+linux_pthread
+snprintf
+true_false
+bool
+semun
+EGREP
+GREP
+INSTALL_DATA
+INSTALL_SCRIPT
+INSTALL_PROGRAM
+CXXCPP
+ac_ct_CXX
+CXXFLAGS
+CXX
+OBJEXT
+EXEEXT
+ac_ct_CC
+CPPFLAGS
+LDFLAGS
+CFLAGS
+CC
+debug
+stripping
+csv_version
+version
+target_alias
+host_alias
+build_alias
+LIBS
+ECHO_T
+ECHO_N
+ECHO_C
+DEFS
+mandir
+localedir
+libdir
+psdir
+pdfdir
+dvidir
+htmldir
+infodir
+docdir
+oldincludedir
+includedir
+localstatedir
+sharedstatedir
+sysconfdir
+datadir
+datarootdir
+libexecdir
+sbindir
+bindir
+program_transform_name
+prefix
+exec_prefix
+PACKAGE_URL
+PACKAGE_BUGREPORT
+PACKAGE_STRING
+PACKAGE_VERSION
+PACKAGE_TARNAME
+PACKAGE_NAME
+PATH_SEPARATOR
+SHELL'
+ac_subst_files=''
+ac_user_opts='
+enable_option_checking
+enable_stripping
+enable_debug
+'
+ ac_precious_vars='build_alias
+host_alias
+target_alias
+CC
+CFLAGS
+LDFLAGS
+LIBS
+CPPFLAGS
+CXX
+CXXFLAGS
+CCC
+CXXCPP'
+
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+ac_unrecognized_opts=
+ac_unrecognized_sep=
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+# (The list follows the same order as the GNU Coding Standards.)
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datarootdir='${prefix}/share'
+datadir='${datarootdir}'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+docdir='${datarootdir}/doc/${PACKAGE}'
+infodir='${datarootdir}/info'
+htmldir='${docdir}'
+dvidir='${docdir}'
+pdfdir='${docdir}'
+psdir='${docdir}'
+libdir='${exec_prefix}/lib'
+localedir='${datarootdir}/locale'
+mandir='${datarootdir}/man'
+
+ac_prev=
+ac_dashdash=
+for ac_option
+do
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval $ac_prev=\$ac_option
+ ac_prev=
+ continue
+ fi
+
+ case $ac_option in
+ *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+ *=) ac_optarg= ;;
+ *) ac_optarg=yes ;;
+ esac
+
+ # Accept the important Cygnus configure options, so we can diagnose typos.
+
+ case $ac_dashdash$ac_option in
+ --)
+ ac_dashdash=yes ;;
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir=$ac_optarg ;;
+
+ -build | --build | --buil | --bui | --bu)
+ ac_prev=build_alias ;;
+ -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+ build_alias=$ac_optarg ;;
+
+ -cache-file | --cache-file | --cache-fil | --cache-fi \
+ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+ ac_prev=cache_file ;;
+ -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+ cache_file=$ac_optarg ;;
+
+ --config-cache | -C)
+ cache_file=config.cache ;;
+
+ -datadir | --datadir | --datadi | --datad)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=*)
+ datadir=$ac_optarg ;;
+
+ -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
+ | --dataroo | --dataro | --datar)
+ ac_prev=datarootdir ;;
+ -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
+ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
+ datarootdir=$ac_optarg ;;
+
+ -disable-* | --disable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid feature name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=no ;;
+
+ -docdir | --docdir | --docdi | --doc | --do)
+ ac_prev=docdir ;;
+ -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
+ docdir=$ac_optarg ;;
+
+ -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
+ ac_prev=dvidir ;;
+ -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
+ dvidir=$ac_optarg ;;
+
+ -enable-* | --enable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid feature name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=\$ac_optarg ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix=$ac_optarg ;;
+
+ -gas | --gas | --ga | --g)
+ # Obsolete; use --with-gas.
+ with_gas=yes ;;
+
+ -help | --help | --hel | --he | -h)
+ ac_init_help=long ;;
+ -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+ ac_init_help=recursive ;;
+ -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+ ac_init_help=short ;;
+
+ -host | --host | --hos | --ho)
+ ac_prev=host_alias ;;
+ -host=* | --host=* | --hos=* | --ho=*)
+ host_alias=$ac_optarg ;;
+
+ -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
+ ac_prev=htmldir ;;
+ -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
+ | --ht=*)
+ htmldir=$ac_optarg ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir=$ac_optarg ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir=$ac_optarg ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir=$ac_optarg ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir=$ac_optarg ;;
+
+ -localedir | --localedir | --localedi | --localed | --locale)
+ ac_prev=localedir ;;
+ -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
+ localedir=$ac_optarg ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst | --locals)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
+ localstatedir=$ac_optarg ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir=$ac_optarg ;;
+
+ -nfp | --nfp | --nf)
+ # Obsolete; use --without-fp.
+ with_fp=no ;;
+
+ -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+ | --no-cr | --no-c | -n)
+ no_create=yes ;;
+
+ -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+ no_recursion=yes ;;
+
+ -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+ | --oldin | --oldi | --old | --ol | --o)
+ ac_prev=oldincludedir ;;
+ -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+ oldincludedir=$ac_optarg ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix=$ac_optarg ;;
+
+ -program-prefix | --program-prefix | --program-prefi | --program-pref \
+ | --program-pre | --program-pr | --program-p)
+ ac_prev=program_prefix ;;
+ -program-prefix=* | --program-prefix=* | --program-prefi=* \
+ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+ program_prefix=$ac_optarg ;;
+
+ -program-suffix | --program-suffix | --program-suffi | --program-suff \
+ | --program-suf | --program-su | --program-s)
+ ac_prev=program_suffix ;;
+ -program-suffix=* | --program-suffix=* | --program-suffi=* \
+ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+ program_suffix=$ac_optarg ;;
+
+ -program-transform-name | --program-transform-name \
+ | --program-transform-nam | --program-transform-na \
+ | --program-transform-n | --program-transform- \
+ | --program-transform | --program-transfor \
+ | --program-transfo | --program-transf \
+ | --program-trans | --program-tran \
+ | --progr-tra | --program-tr | --program-t)
+ ac_prev=program_transform_name ;;
+ -program-transform-name=* | --program-transform-name=* \
+ | --program-transform-nam=* | --program-transform-na=* \
+ | --program-transform-n=* | --program-transform-=* \
+ | --program-transform=* | --program-transfor=* \
+ | --program-transfo=* | --program-transf=* \
+ | --program-trans=* | --program-tran=* \
+ | --progr-tra=* | --program-tr=* | --program-t=*)
+ program_transform_name=$ac_optarg ;;
+
+ -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
+ ac_prev=pdfdir ;;
+ -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
+ pdfdir=$ac_optarg ;;
+
+ -psdir | --psdir | --psdi | --psd | --ps)
+ ac_prev=psdir ;;
+ -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
+ psdir=$ac_optarg ;;
+
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ silent=yes ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir=$ac_optarg ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir=$ac_optarg ;;
+
+ -site | --site | --sit)
+ ac_prev=site ;;
+ -site=* | --site=* | --sit=*)
+ site=$ac_optarg ;;
+
+ -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+ ac_prev=srcdir ;;
+ -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+ srcdir=$ac_optarg ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir=$ac_optarg ;;
+
+ -target | --target | --targe | --targ | --tar | --ta | --t)
+ ac_prev=target_alias ;;
+ -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+ target_alias=$ac_optarg ;;
+
+ -v | -verbose | --verbose | --verbos | --verbo | --verb)
+ verbose=yes ;;
+
+ -version | --version | --versio | --versi | --vers | -V)
+ ac_init_version=: ;;
+
+ -with-* | --with-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid package name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=\$ac_optarg ;;
+
+ -without-* | --without-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid package name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=no ;;
+
+ --x)
+ # Obsolete; use --with-x.
+ with_x=yes ;;
+
+ -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+ | --x-incl | --x-inc | --x-in | --x-i)
+ ac_prev=x_includes ;;
+ -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+ x_includes=$ac_optarg ;;
+
+ -x-libraries | --x-libraries | --x-librarie | --x-librari \
+ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+ ac_prev=x_libraries ;;
+ -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+ x_libraries=$ac_optarg ;;
+
+ -*) as_fn_error $? "unrecognized option: \`$ac_option'
+Try \`$0 --help' for more information"
+ ;;
+
+ *=*)
+ ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+ # Reject names that are not valid shell variable names.
+ case $ac_envvar in #(
+ '' | [0-9]* | *[!_$as_cr_alnum]* )
+ as_fn_error $? "invalid variable name: \`$ac_envvar'" ;;
+ esac
+ eval $ac_envvar=\$ac_optarg
+ export $ac_envvar ;;
+
+ *)
+ # FIXME: should be removed in autoconf 3.0.
+ $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+ expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+ : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
+ ;;
+
+ esac
+done
+
+if test -n "$ac_prev"; then
+ ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+ as_fn_error $? "missing argument to $ac_option"
+fi
+
+if test -n "$ac_unrecognized_opts"; then
+ case $enable_option_checking in
+ no) ;;
+ fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;;
+ *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
+ esac
+fi
+
+# Check all directory arguments for consistency.
+for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
+ datadir sysconfdir sharedstatedir localstatedir includedir \
+ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
+ libdir localedir mandir
+do
+ eval ac_val=\$$ac_var
+ # Remove trailing slashes.
+ case $ac_val in
+ */ )
+ ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
+ eval $ac_var=\$ac_val;;
+ esac
+ # Be sure to have absolute directory names.
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* ) continue;;
+ NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
+ esac
+ as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val"
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+ if test "x$build_alias" = x; then
+ cross_compiling=maybe
+ $as_echo "$as_me: WARNING: if you wanted to set the --build type, don't use --host.
+ If a cross compiler is detected then cross compile mode will be used" >&2
+ elif test "x$build_alias" != "x$host_alias"; then
+ cross_compiling=yes
+ fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+ac_pwd=`pwd` && test -n "$ac_pwd" &&
+ac_ls_di=`ls -di .` &&
+ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
+ as_fn_error $? "working directory cannot be determined"
+test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
+ as_fn_error $? "pwd does not report name of working directory"
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+ ac_srcdir_defaulted=yes
+ # Try the directory containing this script, then the parent directory.
+ ac_confdir=`$as_dirname -- "$as_myself" ||
+$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_myself" : 'X\(//\)[^/]' \| \
+ X"$as_myself" : 'X\(//\)$' \| \
+ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_myself" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ srcdir=$ac_confdir
+ if test ! -r "$srcdir/$ac_unique_file"; then
+ srcdir=..
+ fi
+else
+ ac_srcdir_defaulted=no
+fi
+if test ! -r "$srcdir/$ac_unique_file"; then
+ test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
+ as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir"
+fi
+ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
+ac_abs_confdir=`(
+ cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg"
+ pwd)`
+# When building in place, set srcdir=.
+if test "$ac_abs_confdir" = "$ac_pwd"; then
+ srcdir=.
+fi
+# Remove unnecessary trailing slashes from srcdir.
+# Double slashes in file names in object file debugging info
+# mess up M-x gdb in Emacs.
+case $srcdir in
+*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
+esac
+for ac_var in $ac_precious_vars; do
+ eval ac_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_env_${ac_var}_value=\$${ac_var}
+ eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_cv_env_${ac_var}_value=\$${ac_var}
+done
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+ # Omit some internal or obsolete options to make the list less imposing.
+ # This message is too long to be a string in the A/UX 3.1 sh.
+ cat <<_ACEOF
+\`configure' configures this package to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE. See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+ -h, --help display this help and exit
+ --help=short display options specific to this package
+ --help=recursive display the short help of all the included packages
+ -V, --version display version information and exit
+ -q, --quiet, --silent do not print \`checking ...' messages
+ --cache-file=FILE cache test results in FILE [disabled]
+ -C, --config-cache alias for \`--cache-file=config.cache'
+ -n, --no-create do not create output files
+ --srcdir=DIR find the sources in DIR [configure dir or \`..']
+
+Installation directories:
+ --prefix=PREFIX install architecture-independent files in PREFIX
+ [$ac_default_prefix]
+ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
+ [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+ --bindir=DIR user executables [EPREFIX/bin]
+ --sbindir=DIR system admin executables [EPREFIX/sbin]
+ --libexecdir=DIR program executables [EPREFIX/libexec]
+ --sysconfdir=DIR read-only single-machine data [PREFIX/etc]
+ --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
+ --localstatedir=DIR modifiable single-machine data [PREFIX/var]
+ --libdir=DIR object code libraries [EPREFIX/lib]
+ --includedir=DIR C header files [PREFIX/include]
+ --oldincludedir=DIR C header files for non-gcc [/usr/include]
+ --datarootdir=DIR read-only arch.-independent data root [PREFIX/share]
+ --datadir=DIR read-only architecture-independent data [DATAROOTDIR]
+ --infodir=DIR info documentation [DATAROOTDIR/info]
+ --localedir=DIR locale-dependent data [DATAROOTDIR/locale]
+ --mandir=DIR man documentation [DATAROOTDIR/man]
+ --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE]
+ --htmldir=DIR html documentation [DOCDIR]
+ --dvidir=DIR dvi documentation [DOCDIR]
+ --pdfdir=DIR pdf documentation [DOCDIR]
+ --psdir=DIR ps documentation [DOCDIR]
+_ACEOF
+
+ cat <<\_ACEOF
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+
+ cat <<\_ACEOF
+
+Optional Features:
+ --disable-option-checking ignore unrecognized --enable/--with options
+ --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
+ --enable-FEATURE[=ARG] include FEATURE [ARG=yes]
+ --disable-stripping disables stripping of installed binaries
+ --with-debug enables debug code generation for binaries
+
+Some influential environment variables:
+ CC C compiler command
+ CFLAGS C compiler flags
+ LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a
+ nonstandard directory <lib dir>
+ LIBS libraries to pass to the linker, e.g. -l<library>
+ CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if
+ you have headers in a nonstandard directory <include dir>
+ CXX C++ compiler command
+ CXXFLAGS C++ compiler flags
+ CXXCPP C++ preprocessor
+
+Use these variables to override the choices made by `configure' or to help
+it to find libraries and programs with nonstandard names/locations.
+
+Report bugs to the package provider.
+_ACEOF
+ac_status=$?
+fi
+
+if test "$ac_init_help" = "recursive"; then
+ # If there are subdirs, report their specific --help.
+ for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+ test -d "$ac_dir" ||
+ { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
+ continue
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+ cd "$ac_dir" || { ac_status=$?; continue; }
+ # Check for guested configure.
+ if test -f "$ac_srcdir/configure.gnu"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure.gnu" --help=recursive
+ elif test -f "$ac_srcdir/configure"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure" --help=recursive
+ else
+ $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+ fi || ac_status=$?
+ cd "$ac_pwd" || { ac_status=$?; break; }
+ done
+fi
+
+test -n "$ac_init_help" && exit $ac_status
+if $ac_init_version; then
+ cat <<\_ACEOF
+configure
+generated by GNU Autoconf 2.67
+
+Copyright (C) 2010 Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+_ACEOF
+ exit
+fi
+
+## ------------------------ ##
+## Autoconf initialization. ##
+## ------------------------ ##
+
+# ac_fn_c_try_compile LINENO
+# --------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext
+ if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_compile
+
+# ac_fn_cxx_try_compile LINENO
+# ----------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext
+ if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_compile
+
+# ac_fn_cxx_try_cpp LINENO
+# ------------------------
+# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_cpp ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if { { ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } > conftest.i && {
+ test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_cpp
+
+# ac_fn_cxx_check_type LINENO TYPE VAR INCLUDES
+# ---------------------------------------------
+# Tests whether TYPE exists after having included INCLUDES, setting cache
+# variable VAR accordingly.
+ac_fn_cxx_check_type ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval "test \"\${$3+set}\"" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ eval "$3=no"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+if (sizeof ($2))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+if (sizeof (($2)))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+else
+ eval "$3=yes"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+
+} # ac_fn_cxx_check_type
+
+# ac_fn_cxx_try_run LINENO
+# ------------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes
+# that executables *can* be run.
+ac_fn_cxx_try_run ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && { ac_try='./conftest$ac_exeext'
+ { { case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: program exited with status $ac_status" >&5
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=$ac_status
+fi
+ rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+ eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_run
+
+# ac_fn_cxx_check_header_compile LINENO HEADER VAR INCLUDES
+# ---------------------------------------------------------
+# Tests whether HEADER exists and can be compiled using the include files in
+# INCLUDES, setting the cache variable VAR accordingly.
+ac_fn_cxx_check_header_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval "test \"\${$3+set}\"" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ eval "$3=yes"
+else
+ eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+
+} # ac_fn_cxx_check_header_compile
+
+# ac_fn_cxx_try_link LINENO
+# -------------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_link ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext conftest$ac_exeext
+ if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+ # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+ # interfere with the next link command; also delete a directory that is
+ # left behind by Apple's compiler. We do this before executing the actions.
+ rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+ eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_link
+
+# ac_fn_cxx_check_header_mongrel LINENO HEADER VAR INCLUDES
+# ---------------------------------------------------------
+# Tests whether HEADER exists, giving a warning if it cannot be compiled using
+# the include files in INCLUDES and setting the cache variable VAR
+# accordingly.
+ac_fn_cxx_check_header_mongrel ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if eval "test \"\${$3+set}\"" = set; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval "test \"\${$3+set}\"" = set; then :
+ $as_echo_n "(cached) " >&6
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+else
+ # Is the header compilable?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5
+$as_echo_n "checking $2 usability... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_header_compiler=yes
+else
+ ac_header_compiler=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5
+$as_echo "$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5
+$as_echo_n "checking $2 presence... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <$2>
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+ ac_header_preproc=yes
+else
+ ac_header_preproc=no
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5
+$as_echo "$ac_header_preproc" >&6; }
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in #((
+ yes:no: )
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5
+$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+ ;;
+ no:yes:* )
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5
+$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5
+$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5
+$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5
+$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+ ;;
+esac
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval "test \"\${$3+set}\"" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ eval "$3=\$ac_header_compiler"
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+fi
+ eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+
+} # ac_fn_cxx_check_header_mongrel
+cat >config.log <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by $as_me, which was
+generated by GNU Autoconf 2.67. Invocation command line was
+
+ $ $0 $@
+
+_ACEOF
+exec 5>>config.log
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown`
+
+/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown`
+/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown`
+/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ $as_echo "PATH: $as_dir"
+ done
+IFS=$as_save_IFS
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+ for ac_arg
+ do
+ case $ac_arg in
+ -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ continue ;;
+ *\'*)
+ ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ case $ac_pass in
+ 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
+ 2)
+ as_fn_append ac_configure_args1 " '$ac_arg'"
+ if test $ac_must_keep_next = true; then
+ ac_must_keep_next=false # Got value, back to normal.
+ else
+ case $ac_arg in
+ *=* | --config-cache | -C | -disable-* | --disable-* \
+ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+ | -with-* | --with-* | -without-* | --without-* | --x)
+ case "$ac_configure_args0 " in
+ "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+ esac
+ ;;
+ -* ) ac_must_keep_next=true ;;
+ esac
+ fi
+ as_fn_append ac_configure_args " '$ac_arg'"
+ ;;
+ esac
+ done
+done
+{ ac_configure_args0=; unset ac_configure_args0;}
+{ ac_configure_args1=; unset ac_configure_args1;}
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log. We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Use '\'' to represent an apostrophe within the trap.
+# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
+trap 'exit_status=$?
+ # Save into config.log some information that might help in debugging.
+ {
+ echo
+
+ $as_echo "## ---------------- ##
+## Cache variables. ##
+## ---------------- ##"
+ echo
+ # The following way of writing the cache mishandles newlines in values,
+(
+ for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) { eval $ac_var=; unset $ac_var;} ;;
+ esac ;;
+ esac
+ done
+ (set) 2>&1 |
+ case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ sed -n \
+ "s/'\''/'\''\\\\'\'''\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
+ ;; #(
+ *)
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+)
+ echo
+
+ $as_echo "## ----------------- ##
+## Output variables. ##
+## ----------------- ##"
+ echo
+ for ac_var in $ac_subst_vars
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ $as_echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+
+ if test -n "$ac_subst_files"; then
+ $as_echo "## ------------------- ##
+## File substitutions. ##
+## ------------------- ##"
+ echo
+ for ac_var in $ac_subst_files
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ $as_echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+ fi
+
+ if test -s confdefs.h; then
+ $as_echo "## ----------- ##
+## confdefs.h. ##
+## ----------- ##"
+ echo
+ cat confdefs.h
+ echo
+ fi
+ test "$ac_signal" != 0 &&
+ $as_echo "$as_me: caught signal $ac_signal"
+ $as_echo "$as_me: exit $exit_status"
+ } >&5
+ rm -f core *.core core.conftest.* &&
+ rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
+ exit $exit_status
+' 0
+for ac_signal in 1 2 13 15; do
+ trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -f -r conftest* confdefs.h
+
+$as_echo "/* confdefs.h */" > confdefs.h
+
+# Predefined preprocessor variables.
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_NAME "$PACKAGE_NAME"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_VERSION "$PACKAGE_VERSION"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_STRING "$PACKAGE_STRING"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_URL "$PACKAGE_URL"
+_ACEOF
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer an explicitly selected file to automatically selected ones.
+ac_site_file1=NONE
+ac_site_file2=NONE
+if test -n "$CONFIG_SITE"; then
+ # We do not want a PATH search for config.site.
+ case $CONFIG_SITE in #((
+ -*) ac_site_file1=./$CONFIG_SITE;;
+ */*) ac_site_file1=$CONFIG_SITE;;
+ *) ac_site_file1=./$CONFIG_SITE;;
+ esac
+elif test "x$prefix" != xNONE; then
+ ac_site_file1=$prefix/share/config.site
+ ac_site_file2=$prefix/etc/config.site
+else
+ ac_site_file1=$ac_default_prefix/share/config.site
+ ac_site_file2=$ac_default_prefix/etc/config.site
+fi
+for ac_site_file in "$ac_site_file1" "$ac_site_file2"
+do
+ test "x$ac_site_file" = xNONE && continue
+ if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
+$as_echo "$as_me: loading site script $ac_site_file" >&6;}
+ sed 's/^/| /' "$ac_site_file" >&5
+ . "$ac_site_file" \
+ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "failed to load site script $ac_site_file
+See \`config.log' for more details" "$LINENO" 5 ; }
+ fi
+done
+
+if test -r "$cache_file"; then
+ # Some versions of bash will fail to source /dev/null (special files
+ # actually), so we avoid doing that. DJGPP emulates it as a regular file.
+ if test /dev/null != "$cache_file" && test -f "$cache_file"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
+$as_echo "$as_me: loading cache $cache_file" >&6;}
+ case $cache_file in
+ [\\/]* | ?:[\\/]* ) . "$cache_file";;
+ *) . "./$cache_file";;
+ esac
+ fi
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
+$as_echo "$as_me: creating cache $cache_file" >&6;}
+ >$cache_file
+fi
+
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in $ac_precious_vars; do
+ eval ac_old_set=\$ac_cv_env_${ac_var}_set
+ eval ac_new_set=\$ac_env_${ac_var}_set
+ eval ac_old_val=\$ac_cv_env_${ac_var}_value
+ eval ac_new_val=\$ac_env_${ac_var}_value
+ case $ac_old_set,$ac_new_set in
+ set,)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,set)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,);;
+ *)
+ if test "x$ac_old_val" != "x$ac_new_val"; then
+ # differences in whitespace do not lead to failure.
+ ac_old_val_w=`echo x $ac_old_val`
+ ac_new_val_w=`echo x $ac_new_val`
+ if test "$ac_old_val_w" != "$ac_new_val_w"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
+$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+ ac_cache_corrupted=:
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
+$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
+ eval $ac_var=\$ac_old_val
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5
+$as_echo "$as_me: former value: \`$ac_old_val'" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5
+$as_echo "$as_me: current value: \`$ac_new_val'" >&2;}
+ fi;;
+ esac
+ # Pass precious variables to config.status.
+ if test "$ac_new_set" = set; then
+ case $ac_new_val in
+ *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+ *) ac_arg=$ac_var=$ac_new_val ;;
+ esac
+ case " $ac_configure_args " in
+ *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy.
+ *) as_fn_append ac_configure_args " '$ac_arg'" ;;
+ esac
+ fi
+done
+if $ac_cache_corrupted; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
+$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+ as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
+fi
+## -------------------- ##
+## Main body of script. ##
+## -------------------- ##
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+ac_config_headers="$ac_config_headers conf.h"
+
+
+version="1.97"
+
+csv_version="1.97"
+
+# Check whether --enable-stripping was given.
+if test "${enable_stripping+set}" = set; then :
+ enableval=$enable_stripping; STRIPPING=$strippingval
+else
+ STRIPPING=no
+fi
+
+
+if [ ! "$STRIPPING" = "no" ]; then
+ stripping=""
+else
+ stripping="-s"
+fi
+echo $DEB_BUILD_OPTIONS | grep -q nostrip
+if [ "$?" = "0" ]; then
+ stripping=""
+fi
+
+
+# Check whether --enable-debug was given.
+if test "${enable_debug+set}" = set; then :
+ enableval=$enable_debug; debug=-g
+else
+ debug=""
+fi
+
+echo $DEB_BUILD_OPTIONS | grep -q debug
+if [ "$?" = "0" ]; then
+ debug=-g
+fi
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="${ac_tool_prefix}gcc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+ ac_ct_CC=$CC
+ # Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CC="gcc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+else
+ CC="$ac_cv_prog_CC"
+fi
+
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="${ac_tool_prefix}cc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ fi
+fi
+if test -z "$CC"; then
+ # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+ ac_prog_rejected=no
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+ ac_prog_rejected=yes
+ continue
+ fi
+ ac_cv_prog_CC="cc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+if test $ac_prog_rejected = yes; then
+ # We found a bogon in the path, so make sure we never use it.
+ set dummy $ac_cv_prog_CC
+ shift
+ if test $# != 0; then
+ # We chose a different compiler from the bogus one.
+ # However, it has the same basename, so the bogon will be chosen
+ # first if we set CC to just the basename; use the full file name.
+ shift
+ ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+ fi
+fi
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in cl.exe
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$CC" && break
+ done
+fi
+if test -z "$CC"; then
+ ac_ct_CC=$CC
+ for ac_prog in cl.exe
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CC="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CC" && break
+done
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+fi
+
+fi
+
+
+test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "no acceptable C compiler found in \$PATH
+See \`config.log' for more details" "$LINENO" 5 ; }
+
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+ { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ sed '10a\
+... rest of stderr output deleted ...
+ 10q' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ fi
+ rm -f conftest.er1 conftest.err
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+done
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
+# Try to create an executable without -o first, disregard a.out.
+# It will help us diagnose broken compilers, and finding out an intuition
+# of exeext.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
+$as_echo_n "checking whether the C compiler works... " >&6; }
+ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+
+# The possible output files:
+ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
+
+ac_rmfiles=
+for ac_file in $ac_files
+do
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ * ) ac_rmfiles="$ac_rmfiles $ac_file";;
+ esac
+done
+rm -f $ac_rmfiles
+
+if { { ac_try="$ac_link_default"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link_default") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
+# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
+# in a Makefile. We should not override ac_cv_exeext if it was cached,
+# so that the user can short-circuit this test for compilers unknown to
+# Autoconf.
+for ac_file in $ac_files ''
+do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
+ ;;
+ [ab].out )
+ # We found the default executable, but exeext='' is most
+ # certainly right.
+ break;;
+ *.* )
+ if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
+ then :; else
+ ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ fi
+ # We set ac_cv_exeext here because the later test for it is not
+ # safe: cross compilers may not add the suffix if given an `-o'
+ # argument, so we may need to know it at that point already.
+ # Even if this section looks crufty: it has the advantage of
+ # actually working.
+ break;;
+ * )
+ break;;
+ esac
+done
+test "$ac_cv_exeext" = no && ac_cv_exeext=
+
+else
+ ac_file=''
+fi
+if test -z "$ac_file"; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+$as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "C compiler cannot create executables
+See \`config.log' for more details" "$LINENO" 5 ; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
+$as_echo_n "checking for C compiler default output file name... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
+$as_echo "$ac_file" >&6; }
+ac_exeext=$ac_cv_exeext
+
+rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
+$as_echo_n "checking for suffix of executables... " >&6; }
+if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ # If both `conftest.exe' and `conftest' are `present' (well, observable)
+# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will
+# work properly (i.e., refer to `conftest.exe'), while it won't with
+# `rm'.
+for ac_file in conftest.exe conftest conftest.*; do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ break;;
+ * ) break;;
+ esac
+done
+else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details" "$LINENO" 5 ; }
+fi
+rm -f conftest conftest$ac_cv_exeext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
+$as_echo "$ac_cv_exeext" >&6; }
+
+rm -f conftest.$ac_ext
+EXEEXT=$ac_cv_exeext
+ac_exeext=$EXEEXT
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdio.h>
+int
+main ()
+{
+FILE *f = fopen ("conftest.out", "w");
+ return ferror (f) || fclose (f) != 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files="$ac_clean_files conftest.out"
+# Check that the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
+$as_echo_n "checking whether we are cross compiling... " >&6; }
+if test "$cross_compiling" != yes; then
+ { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ if { ac_try='./conftest$ac_cv_exeext'
+ { { case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }; then
+ cross_compiling=no
+ else
+ if test "$cross_compiling" = maybe; then
+ cross_compiling=yes
+ else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details" "$LINENO" 5 ; }
+ fi
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
+$as_echo "$cross_compiling" >&6; }
+
+rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
+$as_echo_n "checking for suffix of object files... " >&6; }
+if test "${ac_cv_objext+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.o conftest.obj
+if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ for ac_file in conftest.o conftest.obj conftest.*; do
+ test -f "$ac_file" || continue;
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
+ *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
+ break;;
+ esac
+done
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of object files: cannot compile
+See \`config.log' for more details" "$LINENO" 5 ; }
+fi
+rm -f conftest.$ac_cv_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
+$as_echo "$ac_cv_objext" >&6; }
+OBJEXT=$ac_cv_objext
+ac_objext=$OBJEXT
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
+$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
+if test "${ac_cv_c_compiler_gnu+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_compiler_gnu=yes
+else
+ ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
+$as_echo "$ac_cv_c_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+ GCC=yes
+else
+ GCC=
+fi
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
+$as_echo_n "checking whether $CC accepts -g... " >&6; }
+if test "${ac_cv_prog_cc_g+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_save_c_werror_flag=$ac_c_werror_flag
+ ac_c_werror_flag=yes
+ ac_cv_prog_cc_g=no
+ CFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_g=yes
+else
+ CFLAGS=""
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+ ac_c_werror_flag=$ac_save_c_werror_flag
+ CFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
+$as_echo "$ac_cv_prog_cc_g" >&6; }
+if test "$ac_test_CFLAGS" = set; then
+ CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+ if test "$GCC" = yes; then
+ CFLAGS="-g -O2"
+ else
+ CFLAGS="-g"
+ fi
+else
+ if test "$GCC" = yes; then
+ CFLAGS="-O2"
+ else
+ CFLAGS=
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
+$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
+if test "${ac_cv_prog_cc_c89+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_prog_cc_c89=no
+ac_save_CC=$CC
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdarg.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+ char **p;
+ int i;
+{
+ return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+ char *s;
+ va_list v;
+ va_start (v,p);
+ s = g (p, va_arg (v,int));
+ va_end (v);
+ return s;
+}
+
+/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has
+ function prototypes and stuff, but not '\xHH' hex character constants.
+ These don't provoke an error unfortunately, instead are silently treated
+ as 'x'. The following induces an error, until -std is added to get
+ proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an
+ array size at least. It's necessary to write '\x00'==0 to get something
+ that's true only with -std. */
+int osf4_cc_array ['\x00' == 0 ? 1 : -1];
+
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+ inside strings and character constants. */
+#define FOO(x) 'x'
+int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
+
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1];
+ ;
+ return 0;
+}
+_ACEOF
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
+ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+ CC="$ac_save_CC $ac_arg"
+ if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_c89=$ac_arg
+fi
+rm -f core conftest.err conftest.$ac_objext
+ test "x$ac_cv_prog_cc_c89" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c89" in
+ x)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+$as_echo "none needed" >&6; } ;;
+ xno)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+$as_echo "unsupported" >&6; } ;;
+ *)
+ CC="$CC $ac_cv_prog_cc_c89"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
+$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
+esac
+if test "x$ac_cv_prog_cc_c89" != xno; then :
+
+fi
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+if test -z "$CXX"; then
+ if test -n "$CCC"; then
+ CXX=$CCC
+ else
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CXX+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CXX"; then
+ ac_cv_prog_CXX="$CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CXX=$ac_cv_prog_CXX
+if test -n "$CXX"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5
+$as_echo "$CXX" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$CXX" && break
+ done
+fi
+if test -z "$CXX"; then
+ ac_ct_CXX=$CXX
+ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CXX"; then
+ ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CXX="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
+if test -n "$ac_ct_CXX"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5
+$as_echo "$ac_ct_CXX" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CXX" && break
+done
+
+ if test "x$ac_ct_CXX" = x; then
+ CXX="g++"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CXX=$ac_ct_CXX
+ fi
+fi
+
+ fi
+fi
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+ { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ sed '10a\
+... rest of stderr output deleted ...
+ 10q' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ fi
+ rm -f conftest.er1 conftest.err
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5
+$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; }
+if test "${ac_cv_cxx_compiler_gnu+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_compiler_gnu=yes
+else
+ ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_cxx_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5
+$as_echo "$ac_cv_cxx_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+ GXX=yes
+else
+ GXX=
+fi
+ac_test_CXXFLAGS=${CXXFLAGS+set}
+ac_save_CXXFLAGS=$CXXFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5
+$as_echo_n "checking whether $CXX accepts -g... " >&6; }
+if test "${ac_cv_prog_cxx_g+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_save_cxx_werror_flag=$ac_cxx_werror_flag
+ ac_cxx_werror_flag=yes
+ ac_cv_prog_cxx_g=no
+ CXXFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_cv_prog_cxx_g=yes
+else
+ CXXFLAGS=""
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+else
+ ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+ CXXFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_cv_prog_cxx_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5
+$as_echo "$ac_cv_prog_cxx_g" >&6; }
+if test "$ac_test_CXXFLAGS" = set; then
+ CXXFLAGS=$ac_save_CXXFLAGS
+elif test $ac_cv_prog_cxx_g = yes; then
+ if test "$GXX" = yes; then
+ CXXFLAGS="-g -O2"
+ else
+ CXXFLAGS="-g"
+ fi
+else
+ if test "$GXX" = yes; then
+ CXXFLAGS="-O2"
+ else
+ CXXFLAGS=
+ fi
+fi
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5
+$as_echo_n "checking how to run the C++ preprocessor... " >&6; }
+if test -z "$CXXCPP"; then
+ if test "${ac_cv_prog_CXXCPP+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ # Double quotes because CXXCPP needs to be expanded
+ for CXXCPP in "$CXX -E" "/lib/cpp"
+ do
+ ac_preproc_ok=false
+for ac_cxx_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+
+else
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+ # Broken: success on invalid input.
+continue
+else
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+ break
+fi
+
+ done
+ ac_cv_prog_CXXCPP=$CXXCPP
+
+fi
+ CXXCPP=$ac_cv_prog_CXXCPP
+else
+ ac_cv_prog_CXXCPP=$CXXCPP
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5
+$as_echo "$CXXCPP" >&6; }
+ac_preproc_ok=false
+for ac_cxx_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+
+else
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+ # Broken: success on invalid input.
+continue
+else
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+
+else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check
+See \`config.log' for more details" "$LINENO" 5 ; }
+fi
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ac_aux_dir=
+for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do
+ if test -f "$ac_dir/install-sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f "$ac_dir/install.sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ elif test -f "$ac_dir/shtool"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/shtool install -c"
+ break
+ fi
+done
+if test -z "$ac_aux_dir"; then
+ as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5
+fi
+
+# These three variables are undocumented and unsupported,
+# and are intended to be withdrawn in a future Autoconf release.
+# They can cause serious problems if a builder's source tree is in a directory
+# whose full name contains unusual characters.
+ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var.
+ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var.
+ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
+
+
+# Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AmigaOS /C/install, which installs bootblocks on floppy discs
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# OS/2's system install, which has a completely different semantic
+# ./install, which can be erroneously created by make from ./install.sh.
+# Reject install programs that cannot install multiple files.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5
+$as_echo_n "checking for a BSD-compatible install... " >&6; }
+if test -z "$INSTALL"; then
+if test "${ac_cv_path_install+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ # Account for people who put trailing slashes in PATH elements.
+case $as_dir/ in #((
+ ./ | .// | /[cC]/* | \
+ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \
+ /usr/ucb/* ) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ # Don't use installbsd from OSF since it installs stuff as root
+ # by default.
+ for ac_prog in ginstall scoinst install; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then
+ if test $ac_prog = install &&
+ grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ :
+ elif test $ac_prog = install &&
+ grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # program-specific install script used by HP pwplus--don't use.
+ :
+ else
+ rm -rf conftest.one conftest.two conftest.dir
+ echo one > conftest.one
+ echo two > conftest.two
+ mkdir conftest.dir
+ if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" &&
+ test -s conftest.one && test -s conftest.two &&
+ test -s conftest.dir/conftest.one &&
+ test -s conftest.dir/conftest.two
+ then
+ ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
+ break 3
+ fi
+ fi
+ fi
+ done
+ done
+ ;;
+esac
+
+ done
+IFS=$as_save_IFS
+
+rm -rf conftest.one conftest.two conftest.dir
+
+fi
+ if test "${ac_cv_path_install+set}" = set; then
+ INSTALL=$ac_cv_path_install
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ INSTALL=$ac_install_sh
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5
+$as_echo "$INSTALL" >&6; }
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5
+$as_echo_n "checking for an ANSI C-conforming const... " >&6; }
+if test "${ac_cv_c_const+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+/* FIXME: Include the comments suggested by Paul. */
+#ifndef __cplusplus
+ /* Ultrix mips cc rejects this. */
+ typedef int charset[2];
+ const charset cs;
+ /* SunOS 4.1.1 cc rejects this. */
+ char const *const *pcpcc;
+ char **ppc;
+ /* NEC SVR4.0.2 mips cc rejects this. */
+ struct point {int x, y;};
+ static struct point const zero = {0,0};
+ /* AIX XL C 1.02.0.0 rejects this.
+ It does not let you subtract one const X* pointer from another in
+ an arm of an if-expression whose if-part is not a constant
+ expression */
+ const char *g = "string";
+ pcpcc = &g + (g ? g-g : 0);
+ /* HPUX 7.0 cc rejects these. */
+ ++pcpcc;
+ ppc = (char**) pcpcc;
+ pcpcc = (char const *const *) ppc;
+ { /* SCO 3.2v4 cc rejects this. */
+ char *t;
+ char const *s = 0 ? (char *) 0 : (char const *) 0;
+
+ *t++ = 0;
+ if (s) return 0;
+ }
+ { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */
+ int x[] = {25, 17};
+ const int *foo = &x[0];
+ ++foo;
+ }
+ { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */
+ typedef const int *iptr;
+ iptr p = 0;
+ ++p;
+ }
+ { /* AIX XL C 1.02.0.0 rejects this saying
+ "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */
+ struct s { int j; const int *ap[3]; };
+ struct s *b; b->j = 5;
+ }
+ { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */
+ const int foo = 10;
+ if (!foo) return 0;
+ }
+ return !cs[0] && !zero.x;
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_cv_c_const=yes
+else
+ ac_cv_c_const=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5
+$as_echo "$ac_cv_c_const" >&6; }
+if test $ac_cv_c_const = no; then
+
+$as_echo "#define const /**/" >>confdefs.h
+
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
+$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
+if test "${ac_cv_path_GREP+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$GREP"; then
+ ac_path_GREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in grep ggrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue
+# Check for GNU ac_path_GREP and select it if it is found.
+ # Check for GNU $ac_path_GREP
+case `"$ac_path_GREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'GREP' >> "conftest.nl"
+ "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ as_fn_arith $ac_count + 1 && ac_count=$as_val
+ if test $ac_count -gt ${ac_path_GREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_GREP="$ac_path_GREP"
+ ac_path_GREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_GREP_found && break 3
+ done
+ done
+ done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_GREP"; then
+ as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ fi
+else
+ ac_cv_path_GREP=$GREP
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
+$as_echo "$ac_cv_path_GREP" >&6; }
+ GREP="$ac_cv_path_GREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
+$as_echo_n "checking for egrep... " >&6; }
+if test "${ac_cv_path_EGREP+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
+ then ac_cv_path_EGREP="$GREP -E"
+ else
+ if test -z "$EGREP"; then
+ ac_path_EGREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in egrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue
+# Check for GNU ac_path_EGREP and select it if it is found.
+ # Check for GNU $ac_path_EGREP
+case `"$ac_path_EGREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'EGREP' >> "conftest.nl"
+ "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ as_fn_arith $ac_count + 1 && ac_count=$as_val
+ if test $ac_count -gt ${ac_path_EGREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_EGREP="$ac_path_EGREP"
+ ac_path_EGREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_EGREP_found && break 3
+ done
+ done
+ done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_EGREP"; then
+ as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ fi
+else
+ ac_cv_path_EGREP=$EGREP
+fi
+
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
+$as_echo "$ac_cv_path_EGREP" >&6; }
+ EGREP="$ac_cv_path_EGREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
+$as_echo_n "checking for ANSI C header files... " >&6; }
+if test "${ac_cv_header_stdc+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <float.h>
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_cv_header_stdc=yes
+else
+ ac_cv_header_stdc=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+if test $ac_cv_header_stdc = yes; then
+ # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <string.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "memchr" >/dev/null 2>&1; then :
+
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "free" >/dev/null 2>&1; then :
+
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
+ if test "$cross_compiling" = yes; then :
+ :
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ctype.h>
+#include <stdlib.h>
+#if ((' ' & 0x0FF) == 0x020)
+# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
+#else
+# define ISLOWER(c) \
+ (('a' <= (c) && (c) <= 'i') \
+ || ('j' <= (c) && (c) <= 'r') \
+ || ('s' <= (c) && (c) <= 'z'))
+# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
+#endif
+
+#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+int
+main ()
+{
+ int i;
+ for (i = 0; i < 256; i++)
+ if (XOR (islower (i), ISLOWER (i))
+ || toupper (i) != TOUPPER (i))
+ return 2;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_run "$LINENO"; then :
+
+else
+ ac_cv_header_stdc=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5
+$as_echo "$ac_cv_header_stdc" >&6; }
+if test $ac_cv_header_stdc = yes; then
+
+$as_echo "#define STDC_HEADERS 1" >>confdefs.h
+
+fi
+
+# On IRIX 5.3, sys/types and inttypes.h are conflicting.
+for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
+ inttypes.h stdint.h unistd.h
+do :
+ as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+ac_fn_cxx_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
+"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+ac_fn_cxx_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default"
+if test "x$ac_cv_type_size_t" = x""yes; then :
+
+else
+
+cat >>confdefs.h <<_ACEOF
+#define size_t unsigned int
+_ACEOF
+
+fi
+
+
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <sys/types.h>
+#include <sys/ipc.h>
+#include <sys/sem.h>
+
+int
+main ()
+{
+union semun sem;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ semun="yes"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+if [ -n "$semun" ]; then
+ semun="#define SEMUN_IN_SEM_H"
+fi
+
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+bool var;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+else
+ bool="typedef bool char;"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+char c = true; char d = false;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ true_false="0"
+else
+ true_false="1"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdio.h>
+int
+main ()
+{
+char buf[10]; snprintf(buf, sizeof(buf), "abc");
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+
+else
+ snprintf="no"
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext;
+if [ -n "$snprintf" ]; then
+ snprintf="#define NO_SNPRINTF"
+fi
+
+for ac_header in algorithm algo.h algo
+do :
+ as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+ac_fn_cxx_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#define _GNU_SOURCE
+#include <pthread.h>
+
+int
+main ()
+{
+pthread_mutexattr_t attr;
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE_NP);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ linux_pthread="yes"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+if [ -n "$linux_pthread" ]; then
+ linux_pthread="#define LINUX_PTHREAD"
+fi
+
+LDFLAGS=-lpthread
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <pthread.h>
+void * thread_func(void * param) { return NULL; }
+
+int
+main ()
+{
+pthread_t thread_info;
+ pthread_attr_t attr;
+ pthread_create(&thread_info, &attr, &thread_func, NULL);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+ thread_ldflags="-lpthread"
+
+else
+ thread_ldflags="-pthread"
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+
+
+if test "$cross_compiling" = yes; then :
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot run test program while cross compiling
+See \`config.log' for more details" "$LINENO" 5 ; }
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifndef _LARGEFILE64_SOURCE
+#define _LARGEFILE64_SOURCE
+#endif
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+int main () {
+ int fd;
+ off64_t i = off64_t(1) << 32 + 1;
+ const char * const name = "test.2g";
+
+ fd = open64(name, O_CREAT|O_RDWR|O_EXCL|O_LARGEFILE,
+ S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP);
+ if (fd == -1) {
+ perror("open64");
+ exit(1);
+ }
+ unlink(name);
+
+ int r = lseek64(fd, i, SEEK_SET);
+ if (r == -1) {
+ perror("lseek");
+ exit(1);
+ }
+ r = write(fd, &r, 1);
+ if (r == -1) {
+ perror("write");
+ exit(1);
+ }
+ close(fd);
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_run "$LINENO"; then :
+ large_file="yes"
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+if [ -n "$large_file" ]; then
+ large_file="#define _LARGEFILE64_SOURCE"
+fi
+
+ac_config_files="$ac_config_files Makefile bonnie.h port.h bonnie++.spec bon_csv2txt bon_csv2html.1 sun/pkginfo"
+
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, we kill variables containing newlines.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(
+ for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) { eval $ac_var=; unset $ac_var;} ;;
+ esac ;;
+ esac
+ done
+
+ (set) 2>&1 |
+ case $as_nl`(ac_space=' '; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ # `set' does not quote correctly, so add quotes: double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \.
+ sed -n \
+ "s/'/'\\\\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+ ;; #(
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+) |
+ sed '
+ /^ac_cv_env_/b end
+ t clear
+ :clear
+ s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+ t end
+ s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+ :end' >>confcache
+if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+ if test -w "$cache_file"; then
+ test "x$cache_file" != "x/dev/null" &&
+ { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
+$as_echo "$as_me: updating cache $cache_file" >&6;}
+ cat confcache >$cache_file
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
+$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
+ fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+DEFS=-DHAVE_CONFIG_H
+
+ac_libobjs=
+ac_ltlibobjs=
+U=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+ # 1. Remove the extension, and $U if already installed.
+ ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
+ ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
+ # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR
+ # will be set to the directory where LIBOBJS objects are built.
+ as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+ as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+
+: ${CONFIG_STATUS=./config.status}
+ac_write_fail=0
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
+$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
+as_write_fail=0
+cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+
+SHELL=\${CONFIG_SHELL-$SHELL}
+export SHELL
+_ASEOF
+cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='print -r --'
+ as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
+ else
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in #(
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
+ fi
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+case $0 in #((
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there. '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+ fi
+ $as_echo "$as_me: error: $2" >&2
+ as_fn_exit $as_status
+} # as_fn_error
+
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+ return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+ set +e
+ as_fn_set_status $1
+ exit $1
+} # as_fn_exit
+
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+ { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+ eval 'as_fn_append ()
+ {
+ eval $1+=\$2
+ }'
+else
+ as_fn_append ()
+ {
+ eval $1=\$$1\$2
+ }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+ eval 'as_fn_arith ()
+ {
+ as_val=$(( $* ))
+ }'
+else
+ as_fn_arith ()
+ {
+ as_val=`expr "$@" || test $? -eq 1`
+ }
+fi # as_fn_arith
+
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+ case `echo 'xy\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ xy) ECHO_C='\c';;
+ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null
+ ECHO_T=' ';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -p'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -p'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -p'
+ fi
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || eval $as_mkdir_p || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p='mkdir -p "$as_dir"'
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+if test -x / >/dev/null 2>&1; then
+ as_test_x='test -x'
+else
+ if ls -dL / >/dev/null 2>&1; then
+ as_ls_L_option=L
+ else
+ as_ls_L_option=
+ fi
+ as_test_x='
+ eval sh -c '\''
+ if test -d "$1"; then
+ test -d "$1/.";
+ else
+ case $1 in #(
+ -*)set "./$1";;
+ esac;
+ case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
+ ???[sx]*):;;*)false;;esac;fi
+ '\'' sh
+ '
+fi
+as_executable_p=$as_test_x
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+exec 6>&1
+## ----------------------------------- ##
+## Main body of $CONFIG_STATUS script. ##
+## ----------------------------------- ##
+_ASEOF
+test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# Save the log message, to keep $0 and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling.
+ac_log="
+This file was extended by $as_me, which was
+generated by GNU Autoconf 2.67. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
+_ACEOF
+
+case $ac_config_files in *"
+"*) set x $ac_config_files; shift; ac_config_files=$*;;
+esac
+
+case $ac_config_headers in *"
+"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
+esac
+
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+# Files that config.status was made for.
+config_files="$ac_config_files"
+config_headers="$ac_config_headers"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ac_cs_usage="\
+\`$as_me' instantiates files and other configuration actions
+from templates according to the current configuration. Unless the files
+and actions are specified as TAGs, all are instantiated by default.
+
+Usage: $0 [OPTION]... [TAG]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number and configuration settings, then exit
+ --config print configuration, then exit
+ -q, --quiet, --silent
+ do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+ --header=FILE[:TEMPLATE]
+ instantiate the configuration header FILE
+
+Configuration files:
+$config_files
+
+Configuration headers:
+$config_headers
+
+Report bugs to the package provider."
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
+ac_cs_version="\\
+config.status
+configured by $0, generated by GNU Autoconf 2.67,
+ with options \\"\$ac_cs_config\\"
+
+Copyright (C) 2010 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+
+ac_pwd='$ac_pwd'
+srcdir='$srcdir'
+INSTALL='$INSTALL'
+test -n "\$AWK" || AWK=awk
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# The default lists apply if the user does not specify any file.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=?*)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ --*=)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=
+ ac_shift=:
+ ;;
+ *)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+ $as_echo "$ac_cs_version"; exit ;;
+ --config | --confi | --conf | --con | --co | --c )
+ $as_echo "$ac_cs_config"; exit ;;
+ --debug | --debu | --deb | --de | --d | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ '') as_fn_error $? "missing file argument" ;;
+ esac
+ as_fn_append CONFIG_FILES " '$ac_optarg'"
+ ac_need_defaults=false;;
+ --header | --heade | --head | --hea )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ as_fn_append CONFIG_HEADERS " '$ac_optarg'"
+ ac_need_defaults=false;;
+ --he | --h)
+ # Conflict between --help and --header
+ as_fn_error $? "ambiguous option: \`$1'
+Try \`$0 --help' for more information.";;
+ --help | --hel | -h )
+ $as_echo "$ac_cs_usage"; exit ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) as_fn_error $? "unrecognized option: \`$1'
+Try \`$0 --help' for more information." ;;
+
+ *) as_fn_append ac_config_targets " $1"
+ ac_need_defaults=false ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+if \$ac_cs_recheck; then
+ set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+ shift
+ \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
+ CONFIG_SHELL='$SHELL'
+ export CONFIG_SHELL
+ exec "\$@"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+ $as_echo "$ac_log"
+} >&5
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+
+# Handling of arguments.
+for ac_config_target in $ac_config_targets
+do
+ case $ac_config_target in
+ "conf.h") CONFIG_HEADERS="$CONFIG_HEADERS conf.h" ;;
+ "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+ "bonnie.h") CONFIG_FILES="$CONFIG_FILES bonnie.h" ;;
+ "port.h") CONFIG_FILES="$CONFIG_FILES port.h" ;;
+ "bonnie++.spec") CONFIG_FILES="$CONFIG_FILES bonnie++.spec" ;;
+ "bon_csv2txt") CONFIG_FILES="$CONFIG_FILES bon_csv2txt" ;;
+ "bon_csv2html.1") CONFIG_FILES="$CONFIG_FILES bon_csv2html.1" ;;
+ "sun/pkginfo") CONFIG_FILES="$CONFIG_FILES sun/pkginfo" ;;
+
+ *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5 ;;
+ esac
+done
+
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+ test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason against having it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
+$debug ||
+{
+ tmp=
+ trap 'exit_status=$?
+ { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
+' 0
+ trap 'as_fn_exit 1' 1 2 13 15
+}
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+ test -n "$tmp" && test -d "$tmp"
+} ||
+{
+ tmp=./conf$$-$RANDOM
+ (umask 077 && mkdir "$tmp")
+} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5
+
+# Set up the scripts for CONFIG_FILES section.
+# No need to generate them if there are no CONFIG_FILES.
+# This happens for instance with `./config.status config.h'.
+if test -n "$CONFIG_FILES"; then
+
+
+ac_cr=`echo X | tr X '\015'`
+# On cygwin, bash can eat \r inside `` if the user requested igncr.
+# But we know of no other shell where ac_cr would be empty at this
+# point, so we can use a bashism as a fallback.
+if test "x$ac_cr" = x; then
+ eval ac_cr=\$\'\\r\'
+fi
+ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
+if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
+ ac_cs_awk_cr='\\r'
+else
+ ac_cs_awk_cr=$ac_cr
+fi
+
+echo 'BEGIN {' >"$tmp/subs1.awk" &&
+_ACEOF
+
+
+{
+ echo "cat >conf$$subs.awk <<_ACEOF" &&
+ echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
+ echo "_ACEOF"
+} >conf$$subs.sh ||
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'`
+ac_delim='%!_!# '
+for ac_last_try in false false false false false :; do
+ . ./conf$$subs.sh ||
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+
+ ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
+ if test $ac_delim_n = $ac_delim_num; then
+ break
+ elif $ac_last_try; then
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+rm -f conf$$subs.sh
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+cat >>"\$tmp/subs1.awk" <<\\_ACAWK &&
+_ACEOF
+sed -n '
+h
+s/^/S["/; s/!.*/"]=/
+p
+g
+s/^[^!]*!//
+:repl
+t repl
+s/'"$ac_delim"'$//
+t delim
+:nl
+h
+s/\(.\{148\}\)..*/\1/
+t more1
+s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
+p
+n
+b repl
+:more1
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t nl
+:delim
+h
+s/\(.\{148\}\)..*/\1/
+t more2
+s/["\\]/\\&/g; s/^/"/; s/$/"/
+p
+b
+:more2
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t delim
+' <conf$$subs.awk | sed '
+/^[^""]/{
+ N
+ s/\n//
+}
+' >>$CONFIG_STATUS || ac_write_fail=1
+rm -f conf$$subs.awk
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACAWK
+cat >>"\$tmp/subs1.awk" <<_ACAWK &&
+ for (key in S) S_is_set[key] = 1
+ FS = ""
+
+}
+{
+ line = $ 0
+ nfields = split(line, field, "@")
+ substed = 0
+ len = length(field[1])
+ for (i = 2; i < nfields; i++) {
+ key = field[i]
+ keylen = length(key)
+ if (S_is_set[key]) {
+ value = S[key]
+ line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
+ len += length(value) + length(field[++i])
+ substed = 1
+ } else
+ len += 1 + keylen
+ }
+
+ print line
+}
+
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
+ sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
+else
+ cat
+fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \
+ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5
+_ACEOF
+
+# VPATH may cause trouble with some makes, so we remove sole $(srcdir),
+# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+ ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{
+h
+s///
+s/^/:/
+s/[ ]*$/:/
+s/:\$(srcdir):/:/g
+s/:\${srcdir}:/:/g
+s/:@srcdir@:/:/g
+s/^:*//
+s/:*$//
+x
+s/\(=[ ]*\).*/\1/
+G
+s/\n//
+s/^[^=]*=[ ]*$//
+}'
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+fi # test -n "$CONFIG_FILES"
+
+# Set up the scripts for CONFIG_HEADERS section.
+# No need to generate them if there are no CONFIG_HEADERS.
+# This happens for instance with `./config.status Makefile'.
+if test -n "$CONFIG_HEADERS"; then
+cat >"$tmp/defines.awk" <<\_ACAWK ||
+BEGIN {
+_ACEOF
+
+# Transform confdefs.h into an awk script `defines.awk', embedded as
+# here-document in config.status, that substitutes the proper values into
+# config.h.in to produce config.h.
+
+# Create a delimiter string that does not exist in confdefs.h, to ease
+# handling of long lines.
+ac_delim='%!_!# '
+for ac_last_try in false false :; do
+ ac_t=`sed -n "/$ac_delim/p" confdefs.h`
+ if test -z "$ac_t"; then
+ break
+ elif $ac_last_try; then
+ as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+
+# For the awk script, D is an array of macro values keyed by name,
+# likewise P contains macro parameters if any. Preserve backslash
+# newline sequences.
+
+ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
+sed -n '
+s/.\{148\}/&'"$ac_delim"'/g
+t rset
+:rset
+s/^[ ]*#[ ]*define[ ][ ]*/ /
+t def
+d
+:def
+s/\\$//
+t bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3"/p
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p
+d
+:bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3\\\\\\n"\\/p
+t cont
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
+t cont
+d
+:cont
+n
+s/.\{148\}/&'"$ac_delim"'/g
+t clear
+:clear
+s/\\$//
+t bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/"/p
+d
+:bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
+b cont
+' <confdefs.h | sed '
+s/'"$ac_delim"'/"\\\
+"/g' >>$CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ for (key in D) D_is_set[key] = 1
+ FS = ""
+}
+/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
+ line = \$ 0
+ split(line, arg, " ")
+ if (arg[1] == "#") {
+ defundef = arg[2]
+ mac1 = arg[3]
+ } else {
+ defundef = substr(arg[1], 2)
+ mac1 = arg[2]
+ }
+ split(mac1, mac2, "(") #)
+ macro = mac2[1]
+ prefix = substr(line, 1, index(line, defundef) - 1)
+ if (D_is_set[macro]) {
+ # Preserve the white space surrounding the "#".
+ print prefix "define", macro P[macro] D[macro]
+ next
+ } else {
+ # Replace #undef with comments. This is necessary, for example,
+ # in the case of _POSIX_SOURCE, which is predefined and required
+ # on some systems where configure will not decide to define it.
+ if (defundef == "undef") {
+ print "/*", prefix defundef, macro, "*/"
+ next
+ }
+ }
+}
+{ print }
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ as_fn_error $? "could not setup config headers machinery" "$LINENO" 5
+fi # test -n "$CONFIG_HEADERS"
+
+
+eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS "
+shift
+for ac_tag
+do
+ case $ac_tag in
+ :[FHLC]) ac_mode=$ac_tag; continue;;
+ esac
+ case $ac_mode$ac_tag in
+ :[FHL]*:*);;
+ :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5 ;;
+ :[FH]-) ac_tag=-:-;;
+ :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+ esac
+ ac_save_IFS=$IFS
+ IFS=:
+ set x $ac_tag
+ IFS=$ac_save_IFS
+ shift
+ ac_file=$1
+ shift
+
+ case $ac_mode in
+ :L) ac_source=$1;;
+ :[FH])
+ ac_file_inputs=
+ for ac_f
+ do
+ case $ac_f in
+ -) ac_f="$tmp/stdin";;
+ *) # Look for the file first in the build tree, then in the source tree
+ # (if the path is not absolute). The absolute path cannot be DOS-style,
+ # because $ac_f cannot contain `:'.
+ test -f "$ac_f" ||
+ case $ac_f in
+ [\\/$]*) false;;
+ *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+ esac ||
+ as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5 ;;
+ esac
+ case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
+ as_fn_append ac_file_inputs " '$ac_f'"
+ done
+
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ configure_input='Generated from '`
+ $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
+ `' by configure.'
+ if test x"$ac_file" != x-; then
+ configure_input="$ac_file. $configure_input"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
+$as_echo "$as_me: creating $ac_file" >&6;}
+ fi
+ # Neutralize special characters interpreted by sed in replacement strings.
+ case $configure_input in #(
+ *\&* | *\|* | *\\* )
+ ac_sed_conf_input=`$as_echo "$configure_input" |
+ sed 's/[\\\\&|]/\\\\&/g'`;; #(
+ *) ac_sed_conf_input=$configure_input;;
+ esac
+
+ case $ac_tag in
+ *:-:* | *:-) cat >"$tmp/stdin" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
+ esac
+ ;;
+ esac
+
+ ac_dir=`$as_dirname -- "$ac_file" ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ as_dir="$ac_dir"; as_fn_mkdir_p
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+
+ case $ac_mode in
+ :F)
+ #
+ # CONFIG_FILE
+ #
+
+ case $INSTALL in
+ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+ *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
+ esac
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+ac_sed_dataroot='
+/datarootdir/ {
+ p
+ q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p'
+case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ ac_datarootdir_hack='
+ s&@datadir@&$datadir&g
+ s&@docdir@&$docdir&g
+ s&@infodir@&$infodir&g
+ s&@localedir@&$localedir&g
+ s&@mandir@&$mandir&g
+ s&\\\${datarootdir}&$datarootdir&g' ;;
+esac
+_ACEOF
+
+# Neutralize VPATH when `$srcdir' = `.'.
+# Shell code in configure.ac might set extrasub.
+# FIXME: do we really want to maintain this feature?
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_sed_extra="$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s|@configure_input@|$ac_sed_conf_input|;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@top_build_prefix@&$ac_top_build_prefix&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+s&@INSTALL@&$ac_INSTALL&;t t
+$ac_datarootdir_hack
+"
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+ { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
+ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined" >&5
+$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined" >&2;}
+
+ rm -f "$tmp/stdin"
+ case $ac_file in
+ -) cat "$tmp/out" && rm -f "$tmp/out";;
+ *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";;
+ esac \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ ;;
+ :H)
+ #
+ # CONFIG_HEADER
+ #
+ if test x"$ac_file" != x-; then
+ {
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs"
+ } >"$tmp/config.h" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
+$as_echo "$as_me: $ac_file is unchanged" >&6;}
+ else
+ rm -f "$ac_file"
+ mv "$tmp/config.h" "$ac_file" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ fi
+ else
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \
+ || as_fn_error $? "could not create -" "$LINENO" 5
+ fi
+ ;;
+
+
+ esac
+
+done # for ac_tag
+
+
+as_fn_exit 0
+_ACEOF
+ac_clean_files=$ac_clean_files_save
+
+test $ac_write_fail = 0 ||
+ as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded. So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status. When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+ ac_cs_success=:
+ ac_config_status_args=
+ test "$silent" = yes &&
+ ac_config_status_args="$ac_config_status_args --quiet"
+ exec 5>/dev/null
+ $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+ exec 5>>config.log
+ # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+ # would make configure fail if this is the last instruction.
+ $ac_cs_success || as_fn_exit 1
+fi
+if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
+$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
+fi
+
diff --git a/configure.in b/configure.in
new file mode 100644
index 0000000..f92f0ab
--- /dev/null
+++ b/configure.in
@@ -0,0 +1,126 @@
+dnl Process this file with autoconf to produce a configure script.
+AC_INIT(bonnie++.cpp)
+AC_CONFIG_HEADER(conf.h)
+AC_SUBST(version)
+version="1.97"
+AC_SUBST(csv_version)
+csv_version="1.97"
+
+AC_ARG_ENABLE(stripping,
+ [ --disable-stripping disables stripping of installed binaries],
+ STRIPPING=$strippingval, STRIPPING=no)
+AC_SUBST(stripping)
+if [[ ! "$STRIPPING" = "no" ]]; then
+ stripping=""
+else
+ stripping="-s"
+fi
+echo $DEB_BUILD_OPTIONS | grep -q nostrip
+if [[ "$?" = "0" ]]; then
+ stripping=""
+fi
+
+AC_SUBST(debug)
+AC_ARG_ENABLE(debug,
+ [ --with-debug enables debug code generation for binaries],
+ debug=-g, debug="")
+echo $DEB_BUILD_OPTIONS | grep -q debug
+if [[ "$?" = "0" ]]; then
+ debug=-g
+fi
+
+dnl Checks for programs.
+AC_LANG_CPLUSPLUS
+AC_PROG_CC
+AC_PROG_CXX
+AC_PROG_CXXCPP
+AC_PROG_INSTALL
+
+dnl Checks for typedefs, structures, and compiler characteristics.
+AC_C_CONST
+AC_TYPE_SIZE_T
+
+AC_SUBST(semun)
+AC_TRY_COMPILE(#include <sys/types.h>
+#include <sys/ipc.h>
+#include <sys/sem.h>
+ , union semun sem; , semun="yes")
+if [[ -n "$semun" ]]; then
+ semun="#define SEMUN_IN_SEM_H"
+fi
+
+AC_SUBST(bool)
+AC_TRY_COMPILE([], [bool var;] , , bool="typedef bool char;")
+AC_SUBST(true_false)
+AC_TRY_COMPILE(, [char c = true; char d = false;
+] , true_false="0", true_false="1")
+AC_SUBST(snprintf)
+AC_TRY_LINK([#include <stdio.h>], char buf[[10]]; snprintf(buf, sizeof(buf), "abc");,,snprintf="no");
+if [[ -n "$snprintf" ]]; then
+ snprintf="#define NO_SNPRINTF"
+fi
+
+AC_CHECK_HEADERS(algorithm algo.h algo)
+
+AC_SUBST(linux_pthread)
+AC_TRY_COMPILE([#define _GNU_SOURCE
+#include <pthread.h>
+] , [pthread_mutexattr_t attr;
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE_NP);]
+ , linux_pthread="yes")
+if [[ -n "$linux_pthread" ]]; then
+ linux_pthread="#define LINUX_PTHREAD"
+fi
+
+LDFLAGS=-lpthread
+AC_SUBST(thread_ldflags)
+AC_TRY_LINK([#include <pthread.h>
+void * thread_func(void * param) { return NULL; }
+] , [pthread_t thread_info;
+ pthread_attr_t attr;
+ pthread_create(&thread_info, &attr, &thread_func, NULL);]
+ , thread_ldflags="-lpthread"
+ , thread_ldflags="-pthread")
+
+AC_SUBST(large_file)
+AC_TRY_RUN([#ifndef _LARGEFILE64_SOURCE
+#define _LARGEFILE64_SOURCE
+#endif
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+int main () {
+ int fd;
+ off64_t i = off64_t(1) << 32 + 1;
+ const char * const name = "test.2g";
+
+ fd = open64(name, O_CREAT|O_RDWR|O_EXCL|O_LARGEFILE,
+ S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP);
+ if (fd == -1) {
+ perror("open64");
+ exit(1);
+ }
+ unlink(name);
+
+ int r = lseek64(fd, i, SEEK_SET);
+ if (r == -1) {
+ perror("lseek");
+ exit(1);
+ }
+ r = write(fd, &r, 1);
+ if (r == -1) {
+ perror("write");
+ exit(1);
+ }
+ close(fd);
+ return 0;
+}], large_file="yes")
+if [[ -n "$large_file" ]]; then
+ large_file="#define _LARGEFILE64_SOURCE"
+fi
+
+AC_OUTPUT(Makefile bonnie.h port.h bonnie++.spec bon_csv2txt bon_csv2html.1 sun/pkginfo)
diff --git a/copyright.txt b/copyright.txt
new file mode 100644
index 0000000..251b6d3
--- /dev/null
+++ b/copyright.txt
@@ -0,0 +1,19 @@
+Copyright:
+
+ * COPYRIGHT NOTICE:
+ * Copyright (c) Tim Bray <tbray@textuality.com>, 1990.
+ * Copyright (c) Russell Coker <russell@coker.com.au>, 1999.
+
+
+Licensed under the GPL version 2 license.
+
+ * DISCLAIMER:
+ * This program is provided AS IS with no warranty of any kind, and
+ * The author makes no representation with respect to the adequacy of this
+ * program for any particular purpose or with respect to its adequacy to
+ * produce any particular result, and
+ * The author shall not be liable for loss or damage arising out of
+ * the use of this program regardless of how sustained, and
+ * In no event shall the author be liable for special, direct, indirect
+ * or consequential damage, loss, costs or fees or expenses of any
+ * nature or kind.
diff --git a/credits.txt b/credits.txt
new file mode 100644
index 0000000..59978c9
--- /dev/null
+++ b/credits.txt
@@ -0,0 +1,10 @@
+Tim Bray <tbray@textuality.com> Author of the original Bonnie program.
+
+Christian Kagerhuber <c.kagerhuber@t-online.net> Contributed some patches
+including the semaphore code. Also found the srand() bug in the seek code!
+
+Brian A. May <bam@snoopy.apana.org.au> Sponsored the Debianization of this
+and other programs I am writing.
+
+Brad Knowles <blk@skynet.be> Did heaps of testing, especially on BSD, also
+contributed lots of little patches to the source.
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000..2911616
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,655 @@
+bonnie++ (1.97.1) unstable; urgency=medium
+
+ * Fixed the changelog and aim it at unstable not wheezy.
+
+ -- Russell Coker <russell@coker.com.au> Fri, 23 Nov 2012 20:12:46 +1100
+
+bonnie++ (1.97) unstable; urgency=medium
+
+ * Fix a CSV column alignment bug in the case where -s0 is used. Bump the
+ data format version to 1.97. Thanks to Brian Keefer for reporting this.
+ Also added the concurrency and seed fields to the header.
+ Closes: #660564, #669124
+ * Use a const pointer for the large file test.
+ * zcav.8 documents the maximum value of -s
+ * Made the zcav deefault block size 512M to cope with new disks that can
+ sustain >200MB/s. Also documented this.
+ * Made zcav not SEGV when the -c option is used.
+ Closes: #687668
+ * Describe the units for the plain text output in bonnie++.8.
+ Closes: #643821
+ * Removed the outdated section in the description which referred to Lenny.
+ Closes: #693483
+ * Fixed most of the HTML validation errors in the output of bon_csv2html.
+
+ -- Russell Coker <russell@coker.com.au> Fri, 23 Nov 2012 19:32:00 +1100
+
+bonnie++ (1.96) unstable; urgency=low
+
+ * Fix a minor bug in setting the GID.
+ * Make it build on Solaris by checking for O_DIRECT being defined.
+ * The option -n9765 or greater caused the small file test to abort,
+ to increase this I changed the number format to hexadecimal and made it
+ 10 digits. Now any option smaller than -n1073741824 will be OK.
+ Thanks Nick Dokos of HP.
+ * Updated bon_csv2html, made the machine name span two rows, made the
+ concurrency column go away when there's no data, correctly calculated the
+ colors of the speed columns (was off by one column before), and calculate
+ the CPU colors.
+
+ -- Russell Coker <russell@coker.com.au> Sat, 04 Jul 2009 11:24:13 +1000
+
+bonnie++ (1.95) unstable; urgency=low
+
+ * Removed code for building on OS/2, I think that no-one has been interested
+ in this for a while.
+ * Removed code for building on NT, I don't think that anyone was ever very
+ interested in that and it probably didn't work.
+ * Patch from Justin Clift to make getc_putc compile on RHEL 5.2.
+ * Added the -D option to bonnie++ to use direct IO (O_DIRECT) for bulk IO.
+ Thanks to Dave Murch from Violin Memory - http://violin-memory.com/
+ * Fixed some more warnings with recent versions of GCC.
+
+ -- Russell Coker <russell@coker.com.au> Wed, 31 Dec 2008 08:50:06 +1100
+
+bonnie++ (1.94) unstable; urgency=low
+
+ * Fixed typos in man page.
+ Closes: #133714, #449596
+ * Added the URL to the man pages.
+ * Documented why UID==0 is bad.
+ Closes: #182023
+ * Check return value of chdir()
+ * Removed the zcav -s option which did the same thing as -n.
+ * Made zcav report in GiB and MiB/s, and made the default 256MiB.
+ * Improved bonnie++.8 documentation of the -y and -p options.
+ * Made zcav support skipping blocks for faster completion.
+ * Fixed the autoconf checks for 64bit seek.
+
+ -- Russell Coker <russell@coker.com.au> Tue, 19 Aug 2008 18:36:08 +1000
+
+bonnie++ (1.93d) unstable; urgency=low
+
+ * Documented the default chunk size in bonnie++.8.
+
+ * Added support for testing write speed to zcav.
+
+ * Made default block size for zcav 200M.
+
+ -- Russell Coker <russell@coker.com.au> Mon, 3 Dec 2007 10:18:00 +1100
+
+bonnie++ (1.93c) unstable; urgency=low
+
+ * Made it compile with GCC 3.2.
+
+ -- Russell Coker <russell@coker.com.au> Wed, 8 Jan 2003 23:13:00 +0100
+
+bonnie++ (1.93b) unstable; urgency=low
+
+ * Fixed an error in calculating seeks, it used to add an extra 5 seconds to
+ the test time before calculation and therefore under-reported the results.
+
+ * Fixed the signal handling error on Irix.
+
+ * Fixed <algo> check to work with GCC 3.2.
+
+ * Fixed a bug where it would SEGV if you specified more than 999 directories
+ (now it handles 99,999 and displays an error message if you ask for more).
+
+ * Fixed a bug in error handling during a stat test with multiple directories.
+
+ * Fixed the mandir for RPM package building.
+
+ -- Russell Coker <russell@coker.com.au> Wed, 1 Jan 2003 18:02:00 +0100
+
+bonnie++ (1.93a) unstable; urgency=low
+
+ * Made the signal handler extern "C".
+
+ * Updated the RPM packaging.
+
+ * Improved the NT portability code.
+
+ -- Russell Coker <russell@coker.com.au> Fri, 1 Mar 2002 17:28:00 +0100
+
+bonnie++ (1.93) unstable; urgency=low
+
+ * Updated to latest debian standards version and follow the latest Debian
+ policy on stripping and debug generation.
+
+ * Removed optind=0 because it caused problems on BSD.
+
+ * Added new test program getc_putc for per char IO.
+
+ -- Russell Coker <russell@coker.com.au> Sun, 9 Dec 2001 15:52:00 +0100
+
+bonnie++ (1.92b) unstable; urgency=low
+
+ * Fixed a buffer overflow in the random number management code.
+
+ * Fixed a bug where bonnie++ would SEGV if ^C was pressed at the start of the
+ file creation tests.
+
+ * Clarified the -r option to bonnie++ in the man page.
+
+ * Finished adding the support for specifying the seed. Fixed the bon_csv2*
+ programs and their documentation.
+
+ -- Russell Coker <russell@coker.com.au> Sun, 7 Jun 2001 14:03:54 +0200
+
+bonnie++ (1.92a) unstable; urgency=low
+
+ * Made the -d option to bonnie++ work with relative paths. Now it does
+ chdir() to the specifed directory and proceeds as usual.
+
+ * Fixed a tricky bug related to thread management which caused SEGV. For
+ some time I had known of the existance of the bug but I couldn't work
+ out how to reproduce it.
+
+ * Added a feature of specifying the random number seed for repeatable tests.
+ Also made a separate class to manage all random numbers as part of this.
+
+ * Made bonnie++ have the facility to read from a file containing random
+ numbers and created a program to generate them.
+
+ * Added a "make dep" rule and include it's output in the archive.
+
+ * Now I don't use stupid macro names like _close which break on BSD!
+
+ -- Russell Coker <russell@coker.com.au> Mon, 11 Jun 2001 13:39:36 +0200
+
+bonnie++ (1.92) unstable; urgency=low
+
+ * Made the per-byte code use read() and write() instead of putc() and getc().
+ Using the stream functions gives too much variance and isn't as interesting
+ nowadays. I will soon add a separate benchmark for streams vs write() for
+ byte at a time.
+
+ * Made it use a single file >2G on systems that have LFS support. On systems
+ that lack it there will be no more support of more than 2G of data.
+ I believe that now any machine with 1G of RAM should have LFS support!
+ Note that the ./configure test for this takes ages on ReiserFS.
+
+ * Made it remove the ./configure cache files before Debian package build.
+ This fixes the problems with incorrect data being cached for ports.
+
+ * Fixed the memory checks for file creation tests, thanks to
+ Andreas Dilger <adilger@turbolinux.com> .
+
+ * Fixed a minor field alignment bug in bon_csv2html.
+
+ * Made bon_csv2html not display columns that contain no data.
+
+ * Fixed a bug where bon_csv2html could crash on bad data.
+
+ * Fixed a bug where bonnie++ would generate bad CSV data if run with "-n0".
+
+ -- Russell Coker <russell@coker.com.au> Fri, 11 May 2001 16:34:58 +0100
+
+bonnie++ (1.91c) unstable; urgency=low
+
+ * Now compiles with GCC 3.0.
+
+ * Stopped it giving a SIGFPE on Alpha and tidied up the relevant code in the
+ results printing.
+
+ * Fixed the CSV output.
+
+ * Fixed a minor warning.
+
+ -- Russell Coker <russell@coker.com.au> Mon, 16 Apr 2001 18:49:03 +0100
+
+bonnie++ (1.91b) unstable; urgency=low
+
+ * Added an fflush(NULL) to the Sync code to make it display the prompt on
+ buffered devices.
+
+ * Added autoconf check for snprintf().
+
+ * Fixed some minor warnings.
+
+ -- Russell Coker <russell@coker.com.au> Tue, 6 Mar 2001 20:17:17 +0100
+
+bonnie++ (1.91a) unstable; urgency=low
+
+ * Fixed the -p/-y options.
+
+ * Made -n take "k" suffix for kilobytes for the size and the "m" suffix
+ for creating millions of files. Also allow specifying the chunk size
+ for small file tests.
+
+ * Changed readme.html to make it more up to date.
+
+ * Made it conflict/provide/replace the bonnie package. Bonnie++ does
+ everything that bonnie does and fixes some bugs including doing srand()
+ after fork(), and having sensible defaults and detection of RAM installed
+ to avoid meaningless test results.
+ There is no need to have bonnie and bonnie++ installed.
+
+ * Use @exec_prefix@ in autoconf for better standards compliance.
+
+ * Made it possible to specify chunk_size for IO and file tests separately.
+ Added a new field in position 21 in the csv file to record this. So I
+ changed the compatibility number for the bon_csv2* programs.
+
+ * Removed the "wait" code in zcav which caused all the broken-ness. It
+ probably didn't gain us much anyway.
+
+ -- Russell Coker <russell@coker.com.au> Sun, 25 Feb 2001 17:38:49 +0100
+
+bonnie++ (1.91) unstable; urgency=low
+
+ * Fixed the bon_csv2html bug that made the columns not line up if different
+ rows had different numbers of blank columns.
+
+ * Fixed a bug introduced in 1.90f which resulted in Random Seeks always
+ being reported as 0.
+
+ * Made quiet mode not tell you which UID it uses.
+
+ -- Russell Coker <russell@coker.com.au> Sun, 11 Feb 2001 11:23:10 +0100
+
+bonnie++ (1.90g) unstable; urgency=low
+
+ * Added latency parsing support to bon_csv2html.
+
+ * Fixed a number of bugs in bon_csv2html, now does colors correctly.
+
+ -- Russell Coker <russell@coker.com.au> Tue, 6 Feb 2001 14:20:15 +0100
+
+bonnie++ (1.90f) unstable; urgency=low
+
+ * Now use mode "b" for fopen() for NT and OS/2.
+
+ * Made the latency numbers print out correctly.
+
+ * Added latency parsing support to bon_csv2txt.
+
+ -- Russell Coker <russell@coker.com.au> Tue, 30 Jan 2001 16:00:19 +0100
+
+bonnie++ (1.90e) unstable; urgency=low
+
+ * Now should compile properly on NT and OS/2 (NB ZCAV still won't fully work).
+
+ -- Russell Coker <russell@coker.com.au> Thu, 18 Jan 2001 11:08:37 +1100
+
+bonnie++ (1.90d) unstable; urgency=low
+
+ * Updated standards version to 3.2.1.
+
+ * Done some serious work on porting to OS/2 and NT. This may break compile
+ on some versions of UNIX. If so I'll fix it as soon as it gets reported.
+
+ -- Russell Coker <russell@coker.com.au> Thu, 11 Jan 2001 14:01:00 +1100
+
+bonnie++ (1.90c) unstable; urgency=low
+
+ * Rewrote bon_csv2html in C++ and made it assign colors to the fields to show
+ how fast or slow the values are (red for slow, green for fast).
+
+ -- Russell Coker <russell@coker.com.au> Mon, 11 Dec 2000 22:31:20 +0100
+
+bonnie++ (1.90b) unstable; urgency=low
+
+ * Added first stage of support for synchronising bonnie++ instances over a
+ network.
+ This required changing the way the -y option operates.
+ Also created a new Sync class to do this generically.
+
+ * Added code to check the latency of operations in micro-seconds (us).
+
+ -- Russell Coker <russell@coker.com.au> Tue, 5 Dec 2000 17:04:27 +0100
+
+bonnie++ (1.90a) unstable; urgency=low
+
+ * This is the start of the new 2.00 series of Bonnie++. The aim is to do
+ serious performance testing of RAID arrays. All programs will be multi-
+ threaded.
+
+ * Added the multi-threaded zcav that I had removed from the 1.00 series and
+ make Bonnie++ use threads instead of fork(). Next step is to add extra
+ concurency through threads.
+
+ * Changed the format of the csv files. Added a format version number and a
+ place for the version of Bonnie++ that created the data. Made the
+ bon_csv2txt and bon_csv2html programs understand the new format.
+
+ -- Russell Coker <russell@coker.com.au> Wed, 29 Nov 2000 23:53:13 +0100
+
+bonnie++ (1.00f) unstable; urgency=low
+
+ * Fixed the bugs in timing of seeks.
+
+ * Changed the number of seeks from 8000 to 8192.
+
+ * Now the minimum time for a test that will be considered valid is 500ms (for
+ bonnie++ and zcav), it was previously 1000ms but I have been convinced that
+ system clocks are accurate enough for this.
+
+ * Changed the default number of files created for file creation tests from
+ 30K to 16K, this change makes the test time bearable on obsolete file
+ systems like UFS but relies on the above change to work on ReiserFS on
+ Pentium4 class CPUs.
+
+ * Changed the default file size to 300M for IO tests, this reflects the
+ increase in RAM sizes over the last year.
+
+ * Added some more compile warnings and fixed some more trivial bugs.
+
+ * Made the loops: line in zcav output always be prefixed by a '#' for a
+ gnuplot comment.
+
+ * Made zcav correctly display the position in megabytes instead of block-size
+ units.
+
+ -- Russell Coker <russell@coker.com.au> Mon, 27 Nov 2000 09:45:30 +0100
+
+bonnie++ (1.00e) unstable; urgency=low
+
+ * Now exit on sync errors.
+
+ * When directory can't be synced it will display one warning and not try to
+ sync again.
+
+ * Stopped it crashing when there is only one directory to be synced.
+
+ * Made the version number managed by autoconf so I don't release it with
+ inconsistant version numbers again!
+
+ -- Russell Coker <russell@coker.com.au> Thu, 9 Nov 2000 03:26:15 +0100
+
+bonnie++ (1.00d) unstable; urgency=low
+
+ * Use SA_RESETHAND instead of SA_ONESHOT for the benefit of Solaris.
+
+ * Added a specific rule for bon_suid.cpp as it doesn't have a header file.
+
+ * Added --prefix support to the ./configure , also made the default prefix
+ be /usr/local instead of /usr .
+
+ * Changed the autoconf support for checking for C++ slightly. It should
+ now work better for some strange setups and work the same for everyone
+ else (I hope).
+
+ * Made the autoconf tests for semaphore headers work correctly, now it
+ should compile on *BSD.
+
+ * Added --disable-stripping option for ./configure if you don't want binaries
+ stripped.
+
+ * Added autoconf checking for bool, now it should compile on AIX using ICC
+ without any special options!
+
+ * Reverted zcav to the 1.00a version and then added the code for -u, -g, and
+ the fix for large numbers of data points. The multi-threaded zcav code
+ will go into 1.90 (the pre-2.00 tree).
+ Bonnie++ versions < 1.90 will never again have threading code.
+
+ * Made bon_csv2txt use every available character for the description.
+
+ * Made it install man pages by default.
+
+ * Added sun package support - "make -C sun"!
+
+ -- Russell Coker <russell@coker.com.au> Thu, 28 Sep 2000 16:22:15 +0100
+
+bonnie++ (1.00c) unstable; urgency=low
+
+ * Closes:#53545
+ Fixed in 0.99e, should have been closed in Debian BTS.
+
+ * Closes:#53546
+ Fixed in 0.99e, should have been closed in Debian BTS.
+
+ * Closes:#61925
+ Fixed in 1.00a.
+
+ * Closes:#64995
+ It is in /usr/sbin because it can only sensibly be run by the administrator
+ of the machine, otherwise it probably will give bad results and may impact
+ the performance of the machine in question. Also it can now change
+ UID/GID. The new way of dealing with the running as root issue is in the
+ next change.
+
+ * Made zcav and bonnie++ take -u and -g options to set the UID and GID to run
+ as. For bonnie++ it is now mandatory to use the -u option when running as
+ root.
+
+ * Made bonnie++ not change it's command-line.
+
+ * Documented the K and G suffixes for sizes on the command-line.
+
+ * Now the CPU time field also displays as "+++" if the elapsed time is < 1s.
+
+ * Fixed the machine-name broken-ness from 1.00b, and made the machine name
+ default to the nodename as reported by utsname() (also the output of
+ `hostname` or `uname -n`).
+
+ * Now uses sysconf() to check the ram size, you can use -r to over-ride it.
+ Also the default file size for IO tests will be twice the reported RAM
+ size or 200M (whichever is larger).
+
+ * Now Bonnie++ handles ^C from the keyboard, the XCPU and XFSZ (excessive CPU
+ and excessive file size) flags and aborts the program cleanly removing all
+ files when it receives them. Also ignores SIGHUP.
+
+ * Added AC_PROG_CC to configure.in so that it can compile on systems with
+ strange C compiler setups.
+
+ -- Russell Coker <russell@coker.com.au> Fri, 08 Sep 2000 08:22:47 +0100
+
+bonnie++ (1.00b) unstable; urgency=low
+
+ * Added more warnings to the compile and removed assertions. Made some
+ trivial changes to the code (like changing variable names) to stop the
+ warnings.
+
+ * Fixed the memory management problem on bonnie++, these made it not work on
+ IA64 (and stopped it working correctly on most platforms). Thanks to
+ Electric Fence by Bruce Perens for the discovery of this.
+ The worst part of it was introduced in testing this version, so it only
+ hit me and my alpha-testers.
+
+ * Fixed zcav for large numbers of data points.
+
+ * Made zcav multi-threaded to test multiple hard drives at once. Changed the
+ way it works totally.
+
+ * Removed some dependencies on extensions to the C++ standard which are not
+ supported in all compilers, also removed some trivial header file issues.
+ These were found in testing on Tru64Unix.
+
+ * Fixed a bug in bonnie++, it would go into an infinite loop when the file
+ creation tests had a non-zero size.
+
+ * Made bonnie++ work for block-reads that return partial blocks, now it will
+ print an error and do another read for the rest.
+
+ * Made Bonnie++ accept machine names up to 4095 bytes and not crash if the
+ name is longer. Previously the limit was 20 bytes and it crashed when you
+ exceeded it.
+
+ * This version is fairly experimental but I'm releasing it now because I need
+ wider testing of the new features.
+
+ -- Russell Coker <russell@coker.com.au> Fri, 25 Aug 2000 12:15:06 +0200
+
+bonnie++ (1.00a) unstable; urgency=low
+
+ * Added a 30 second startup delay when run as root. A user lost some data
+ because of running it as root, if they had run it as a regular account
+ they would be OK. I don't want this to happen again.
+
+ * Zcav now displays an error if it can't read a single block.
+
+ * Added some basic autoconf support which I will use to increase portability
+ in future versions.
+
+ * Now provides zcav.
+
+ * Should compile with all old versions of gcc.
+
+ * Fixed a warning on Alpha.
+
+ -- Russell Coker <russell@coker.com.au> Mon, 24 Apr 2000 23:34:02 +0100
+
+bonnie++ (1.00) unstable; urgency=low
+
+ * Now include ZCAV in the same package. ZCAV package should disappear.
+
+ * License is now GPL. Tim Bray agrees to the GPL for his parts, the license
+ conflict was stopping me from putting ZCAV into the archive.
+
+ * ZCAV reads through a hard drive sequentially and reports the IO speeds for
+ different zones of the drive.
+
+ * Fixed a few minor issues with the documentation, and put the test programs
+ in /usr/sbin as they aren't generally run by regular users. Also use man
+ section 8.
+
+ -- Russell Coker <russell@coker.com.au> Sat, 01 Mar 2000 12:01:00 +0100
+
+bonnie++ (0.99j) unstable; urgency=low
+
+ * 0.99h core dumped when you didn't specify "-b" for file creation tests,
+ fixed.
+
+ -- Russell Coker <russell@coker.com.au> Sun, 05 Mar 2000 11:16:42 +0100
+
+bonnie++ (0.99h) unstable; urgency=low
+
+ * Fixed a variety of bugs in the semaphore code which were introduced in
+ 0.99g.
+
+ * Fixed formatting of output.
+
+ * Added "-b" option to sync all writes.
+
+ * Changed the semaphore code to make it more easily hackable for BSD users,
+ it won't compile as-is on BSD at the moment...
+
+ -- Russell Coker <russell@coker.com.au> Sun, 05 Mar 2000 11:16:42 +0100
+
+bonnie++ (0.99g) unstable; urgency=low
+
+ * Now use getopt() for checking command-line options.
+
+ * Added new versions of fork and semaphore code, initially developed for
+ postal.
+
+ * Fixed the message that's displayed when bad command-line parameters are
+ entered.
+
+ * Version 1.[0-8]0 will use fork(). Version 1.90 and above will use POSIX
+ threads and include the concurrant bonnie++ functionality I've been
+ promising for so long.
+
+ -- Russell Coker <russell@coker.com.au> Wed, 23 Feb 2000 22:16:23 +0100
+
+bonnie++ (0.99f) unstable; urgency=low
+
+ * Added "-f" parameter to skip per-char tests and semaphore code to
+ synchronise multiple instances of Bonnie++. Thanks to
+ Christian Kagerhuber <c.kagerhuber@t-online.net> for the patch!
+
+ * Added srand() after the fork so each child gets different random numbers.
+
+ -- Russell Coker <russell@coker.com.au> Wed, 12 Jan 2000 16:45:28 +1100
+
+bonnie++ (0.99e) unstable; urgency=low
+
+ * Fixed the operation of "-x" parameter (used to just cause crashes).
+
+ * Made it cleanly exit under some error conditions where it used to crash.
+
+ * Improved the bonnie++ man page.
+
+ * Fixed some checking of command-line parameters.
+
+ * Merged code from the OS/2 port, needs lots of testing...
+
+ -- Russell Coker <russell@coker.com.au> Wed, 12 Jan 2000 16:45:28 +1100
+
+bonnie++ (0.99d) unstable; urgency=low
+
+ * Added some more functionality. Tests hard and soft link creation.
+
+ * Fixed CSV output of <100 seeks per second.
+
+ -- Russell Coker <russell@coker.com.au> Sun, 21 Nov 1999 22:37:42 +0200
+
+bonnie++ (0.99c) unstable; urgency=low
+
+ * Fix some bugs with big IO (fseek related) and include man pages.
+
+ * Made it always print the CSV data.
+
+ -- Russell Coker <russell@coker.com.au> Sun, 21 Nov 1999 22:37:42 +0200
+
+bonnie++ (0.99b) unstable; urgency=low
+
+ * Initial Release as a Debian package.
+
+
+0.99
+Files are created mode 0600 not 0777.
+
+Fixed some bugs in 0.98 where the results from several tests were totally
+wrong.
+
+Now the random file code will take less CPU time when there are extremely
+large numbers of files.
+
+Changed the format of all the output files slightly. Notable change is that
+the percentages of CPU time are now rounded off to the nearest percent. This
+is because it's not that accurate anyway (results that are provably more than
+1% wrong are not uncommon), and because I needed the extra 1 character per
+field. Also now it handles CPU time >100% properly. This is for SMP systems
+where more than 1 CPU is being used. Concurrant Bonnie++ will return many
+results significantly greater than 100% on OSs that work well with SMP.
+
+Added a csv2txt.pl program. The main aim of this is to display data well
+for 80 column braille displays for the blind.
+
+Added "-q" option for quiet mode (less output).
+
+Now the "-n" option works on a multiple of 1024. So "-n 10" means create
+10240 files. This change is to allow the output to display in the same
+format and save space in display (who would want to test as a lower resolution
+than per 1024 files anyway).
+
+The -n option is now of the form "num[:max[:min]]" where max is the maximum
+size (default 0) and min is the minimum size (default 0). To simulate Squid
+use a max of 15000 and a min of 300. To simulate INN use a maximum of 4096
+and a minimum of 512.
+
+1.0 will be out soon!
+
+0.98
+Fixed a bug where the data size couldn't be an exact multiple of the size of
+each file (1 gig). Fixed a number of other minor bugs related to that and
+added more error checking as well.
+Changed the code to support up to 1000 files for the IO test, if each is a
+gig then you can test a tera-byte of data. Changing the code to have more
+than 1000 files wouldn't be that difficult to do.
+
+Use the new C++ type conversions.
+
+0.97
+I have stopped using cout/cerr and never plan to use them again. They caused
+me significant pain when trying to get it going on an ancient SGI system.
+
+Also changed the code structure a bit to make it cleaner. One advantage of
+this is that there is now a "-x" option to tell bonnie++ to run the same test
+a number of times (it's interesting to see the variance in the results).
+
+Now use fflush() after writing each set of results. This means that killing
+the program unexpectedly won't result in results being lost. Also fixes a
+strange bug related to printf() on Linux which I am still looking into.
+
+
+ -- Russell Coker <russell@coker.com.au> Wed, 13 Oct 1999 22:15:53 +0200
+
+Local variables:
+mode: debian-changelog
+End:
diff --git a/debian/changelog.txt b/debian/changelog.txt
new file mode 100644
index 0000000..62f54aa
--- /dev/null
+++ b/debian/changelog.txt
@@ -0,0 +1,380 @@
+bonnie++ (1.90d) unstable; urgency=low
+
+ * Updated standards version to 3.2.1.
+
+ * Done some serious work on porting to OS/2 and NT. This may break compile
+ on some versions of UNIX. If so I'll fix it as soon as it gets reported.
+
+ -- Russell Coker <russell@coker.com.au> Mon, 11 Dec 2000 22:31:20 +0100
+
+bonnie++ (1.90c) unstable; urgency=low
+
+ * Rewrote bon_csv2html in C++ and made it assign colors to the fields to show
+ how fast or slow the values are (red for slow, green for fast).
+
+ -- Russell Coker <russell@coker.com.au> Mon, 11 Dec 2000 22:31:20 +0100
+
+bonnie++ (1.90b) unstable; urgency=low
+
+ * Added first stage of support for synchronising bonnie++ instances over a
+ network.
+ This required changing the way the -y option operates.
+ Also created a new Sync class to do this generically.
+
+ * Added code to check the latency of operations in micro-seconds (us).
+
+ -- Russell Coker <russell@coker.com.au> Tue, 5 Dec 2000 17:04:27 +0100
+
+bonnie++ (1.90a) unstable; urgency=low
+
+ * This is the start of the new 2.00 series of Bonnie++. The aim is to do
+ serious performance testing of RAID arrays. All programs will be multi-
+ threaded.
+
+ * Added the multi-threaded zcav that I had removed from the 1.00 series and
+ make Bonnie++ use threads instead of fork(). Next step is to add extra
+ concurency through threads.
+
+ * Changed the format of the csv files. Added a format version number and a
+ place for the version of Bonnie++ that created the data. Made the
+ bon_csv2txt and bon_csv2html programs understand the new format.
+
+ -- Russell Coker <russell@coker.com.au> Wed, 29 Nov 2000 23:53:13 +0100
+
+bonnie++ (1.00f) unstable; urgency=low
+
+ * Fixed the bugs in timing of seeks.
+
+ * Changed the number of seeks from 8000 to 8192.
+
+ * Now the minimum time for a test that will be considered valid is 500ms (for
+ bonnie++ and zcav), it was previously 1000ms but I have been convinced that
+ system clocks are accurate enough for this.
+
+ * Changed the default number of files created for file creation tests from
+ 30K to 16K, this change makes the test time bearable on obsolete file
+ systems like UFS but relies on the above change to work on ReiserFS on
+ Pentium4 class CPUs.
+
+ * Changed the default file size to 300M for IO tests, this reflects the
+ increase in RAM sizes over the last year.
+
+ * Added some more compile warnings and fixed some more trivial bugs.
+
+ * Made the loops: line in zcav output always be prefixed by a '#' for a
+ gnuplot comment.
+
+ * Made zcav correctly display the position in megabytes instead of block-size
+ units.
+
+ -- Russell Coker <russell@coker.com.au> Mon, 27 Nov 2000 09:45:30 +0100
+
+bonnie++ (1.00e) unstable; urgency=low
+
+ * Now exit on sync errors.
+
+ * When directory can't be synced it will display one warning and not try to
+ sync again.
+
+ * Stopped it crashing when there is only one directory to be synced.
+
+ * Made the version number managed by autoconf so I don't release it with
+ inconsistant version numbers again!
+
+ -- Russell Coker <russell@coker.com.au> Thu, 9 Nov 2000 03:26:15 +0100
+
+bonnie++ (1.00d) unstable; urgency=low
+
+ * Use SA_RESETHAND instead of SA_ONESHOT for the benefit of Solaris.
+
+ * Added a specific rule for bon_suid.cpp as it doesn't have a header file.
+
+ * Added --prefix support to the ./configure , also made the default prefix
+ be /usr/local instead of /usr .
+
+ * Changed the autoconf support for checking for C++ slightly. It should
+ now work better for some strange setups and work the same for everyone
+ else (I hope).
+
+ * Made the autoconf tests for semaphore headers work correctly, now it
+ should compile on *BSD.
+
+ * Added --disable-stripping option for ./configure if you don't want binaries
+ stripped.
+
+ * Added autoconf checking for bool, now it should compile on AIX using ICC
+ without any special options!
+
+ * Reverted zcav to the 1.00a version and then added the code for -u, -g, and
+ the fix for large numbers of data points. The multi-threaded zcav code
+ will go into 1.90 (the pre-2.00 tree).
+ Bonnie++ versions < 1.90 will never again have threading code.
+
+ * Made bon_csv2txt use every available character for the description.
+
+ * Made it install man pages by default.
+
+ * Added sun package support - "make -C sun"!
+
+ -- Russell Coker <russell@coker.com.au> Thu, 28 Sep 2000 16:22:15 +0100
+
+bonnie++ (1.00c) unstable; urgency=low
+
+ * Closes:#53545
+ Fixed in 0.99e, should have been closed in Debian BTS.
+
+ * Closes:#53546
+ Fixed in 0.99e, should have been closed in Debian BTS.
+
+ * Closes:#61925
+ Fixed in 1.00a.
+
+ * Closes:#64995
+ It is in /usr/sbin because it can only sensibly be run by the administrator
+ of the machine, otherwise it probably will give bad results and may impact
+ the performance of the machine in question. Also it can now change
+ UID/GID. The new way of dealing with the running as root issue is in the
+ next change.
+
+ * Made zcav and bonnie++ take -u and -g options to set the UID and GID to run
+ as. For bonnie++ it is now mandatory to use the -u option when running as
+ root.
+
+ * Made bonnie++ not change it's command-line.
+
+ * Documented the K and G suffixes for sizes on the command-line.
+
+ * Now the CPU time field also displays as "+++" if the elapsed time is < 1s.
+
+ * Fixed the machine-name broken-ness from 1.00b, and made the machine name
+ default to the nodename as reported by utsname() (also the output of
+ `hostname` or `uname -n`).
+
+ * Now uses sysconf() to check the ram size, you can use -r to over-ride it.
+ Also the default file size for IO tests will be twice the reported RAM
+ size or 200M (whichever is larger).
+
+ * Now Bonnie++ handles ^C from the keyboard, the XCPU and XFSZ (excessive CPU
+ and excessive file size) flags and aborts the program cleanly removing all
+ files when it receives them. Also ignores SIGHUP.
+
+ * Added AC_PROG_CC to configure.in so that it can compile on systems with
+ strange C compiler setups.
+
+ -- Russell Coker <russell@coker.com.au> Fri, 08 Sep 2000 08:22:47 +0100
+
+bonnie++ (1.00b) unstable; urgency=low
+
+ * Added more warnings to the compile and removed assertions. Made some
+ trivial changes to the code (like changing variable names) to stop the
+ warnings.
+
+ * Fixed the memory management problem on bonnie++, these made it not work on
+ IA64 (and stopped it working correctly on most platforms). Thanks to
+ Electric Fence by Bruce Perens for the discovery of this.
+ The worst part of it was introduced in testing this version, so it only
+ hit me and my alpha-testers.
+
+ * Fixed zcav for large numbers of data points.
+
+ * Made zcav multi-threaded to test multiple hard drives at once. Changed the
+ way it works totally.
+
+ * Removed some dependencies on extensions to the C++ standard which are not
+ supported in all compilers, also removed some trivial header file issues.
+ These were found in testing on Tru64Unix.
+
+ * Fixed a bug in bonnie++, it would go into an infinite loop when the file
+ creation tests had a non-zero size.
+
+ * Made bonnie++ work for block-reads that return partial blocks, now it will
+ print an error and do another read for the rest.
+
+ * Made Bonnie++ accept machine names up to 4095 bytes and not crash if the
+ name is longer. Previously the limit was 20 bytes and it crashed when you
+ exceeded it.
+
+ * This version is fairly experimental but I'm releasing it now because I need
+ wider testing of the new features.
+
+ -- Russell Coker <russell@coker.com.au> Fri, 25 Aug 2000 12:15:06 +0200
+
+bonnie++ (1.00a) unstable; urgency=low
+
+ * Added a 30 second startup delay when run as root. A user lost some data
+ because of running it as root, if they had run it as a regular account
+ they would be OK. I don't want this to happen again.
+
+ * Zcav now displays an error if it can't read a single block.
+
+ * Added some basic autoconf support which I will use to increase portability
+ in future versions.
+
+ * Now provides zcav.
+
+ * Should compile with all old versions of gcc.
+
+ * Fixed a warning on Alpha.
+
+ -- Russell Coker <russell@coker.com.au> Mon, 24 Apr 2000 23:34:02 +0100
+
+bonnie++ (1.00) unstable; urgency=low
+
+ * Now include ZCAV in the same package. ZCAV package should disappear.
+
+ * License is now GPL. Tim Bray agrees to the GPL for his parts, the license
+ conflict was stopping me from putting ZCAV into the archive.
+
+ * ZCAV reads through a hard drive sequentially and reports the IO speeds for
+ different zones of the drive.
+
+ * Fixed a few minor issues with the documentation, and put the test programs
+ in /usr/sbin as they aren't generally run by regular users. Also use man
+ section 8.
+
+ -- Russell Coker <russell@coker.com.au> Sat, 01 Mar 2000 12:01:00 +0100
+
+bonnie++ (0.99j) unstable; urgency=low
+
+ * 0.99h core dumped when you didn't specify "-b" for file creation tests,
+ fixed.
+
+ -- Russell Coker <russell@coker.com.au> Sun, 05 Mar 2000 11:16:42 +0100
+
+bonnie++ (0.99h) unstable; urgency=low
+
+ * Fixed a variety of bugs in the semaphore code which were introduced in
+ 0.99g.
+
+ * Fixed formatting of output.
+
+ * Added "-b" option to sync all writes.
+
+ * Changed the semaphore code to make it more easily hackable for BSD users,
+ it won't compile as-is on BSD at the moment...
+
+ -- Russell Coker <russell@coker.com.au> Sun, 05 Mar 2000 11:16:42 +0100
+
+bonnie++ (0.99g) unstable; urgency=low
+
+ * Now use getopt() for checking command-line options.
+
+ * Added new versions of fork and semaphore code, initially developed for
+ postal.
+
+ * Fixed the message that's displayed when bad command-line parameters are
+ entered.
+
+ * Version 1.[0-8]0 will use fork(). Version 1.90 and above will use POSIX
+ threads and include the concurrant bonnie++ functionality I've been
+ promising for so long.
+
+ -- Russell Coker <russell@coker.com.au> Wed, 23 Feb 2000 22:16:23 +0100
+
+bonnie++ (0.99f) unstable; urgency=low
+
+ * Added "-f" parameter to skip per-char tests and semaphore code to
+ synchronise multiple instances of Bonnie++. Thanks to
+ Christian Kagerhuber <c.kagerhuber@t-online.net> for the patch!
+
+ * Added srand() after the fork so each child gets different random numbers.
+
+ -- Russell Coker <russell@coker.com.au> Wed, 12 Jan 2000 16:45:28 +1100
+
+bonnie++ (0.99e) unstable; urgency=low
+
+ * Fixed the operation of "-x" parameter (used to just cause crashes).
+
+ * Made it cleanly exit under some error conditions where it used to crash.
+
+ * Improved the bonnie++ man page.
+
+ * Fixed some checking of command-line parameters.
+
+ * Merged code from the OS/2 port, needs lots of testing...
+
+ -- Russell Coker <russell@coker.com.au> Wed, 12 Jan 2000 16:45:28 +1100
+
+bonnie++ (0.99d) unstable; urgency=low
+
+ * Added some more functionality. Tests hard and soft link creation.
+
+ * Fixed CSV output of <100 seeks per second.
+
+ -- Russell Coker <russell@coker.com.au> Sun, 21 Nov 1999 22:37:42 +0200
+
+bonnie++ (0.99c) unstable; urgency=low
+
+ * Fix some bugs with big IO (fseek related) and include man pages.
+
+ * Made it always print the CSV data.
+
+ -- Russell Coker <russell@coker.com.au> Sun, 21 Nov 1999 22:37:42 +0200
+
+bonnie++ (0.99b) unstable; urgency=low
+
+ * Initial Release as a Debian package.
+
+
+0.99
+Files are created mode 0600 not 0777.
+
+Fixed some bugs in 0.98 where the results from several tests were totally
+wrong.
+
+Now the random file code will take less CPU time when there are extremely
+large numbers of files.
+
+Changed the format of all the output files slightly. Notable change is that
+the percentages of CPU time are now rounded off to the nearest percent. This
+is because it's not that accurate anyway (results that are provably more than
+1% wrong are not uncommon), and because I needed the extra 1 character per
+field. Also now it handles CPU time >100% properly. This is for SMP systems
+where more than 1 CPU is being used. Concurrant Bonnie++ will return many
+results significantly greater than 100% on OSs that work well with SMP.
+
+Added a csv2txt.pl program. The main aim of this is to display data well
+for 80 column braille displays for the blind.
+
+Added "-q" option for quiet mode (less output).
+
+Now the "-n" option works on a multiple of 1024. So "-n 10" means create
+10240 files. This change is to allow the output to display in the same
+format and save space in display (who would want to test as a lower resolution
+than per 1024 files anyway).
+
+The -n option is now of the form "num[:max[:min]]" where max is the maximum
+size (default 0) and min is the minimum size (default 0). To simulate Squid
+use a max of 15000 and a min of 300. To simulate INN use a maximum of 4096
+and a minimum of 512.
+
+1.0 will be out soon!
+
+0.98
+Fixed a bug where the data size couldn't be an exact multiple of the size of
+each file (1 gig). Fixed a number of other minor bugs related to that and
+added more error checking as well.
+Changed the code to support up to 1000 files for the IO test, if each is a
+gig then you can test a tera-byte of data. Changing the code to have more
+than 1000 files wouldn't be that difficult to do.
+
+Use the new C++ type conversions.
+
+0.97
+I have stopped using cout/cerr and never plan to use them again. They caused
+me significant pain when trying to get it going on an ancient SGI system.
+
+Also changed the code structure a bit to make it cleaner. One advantage of
+this is that there is now a "-x" option to tell bonnie++ to run the same test
+a number of times (it's interesting to see the variance in the results).
+
+Now use fflush() after writing each set of results. This means that killing
+the program unexpectedly won't result in results being lost. Also fixes a
+strange bug related to printf() on Linux which I am still looking into.
+
+
+ -- Russell Coker <russell@coker.com.au> Wed, 13 Oct 1999 22:15:53 +0200
+
+Local variables:
+mode: debian-changelog
+End:
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000..4150347
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,21 @@
+Source: bonnie++
+Section: utils
+Priority: optional
+Maintainer: Russell Coker <russell@coker.com.au>
+Standards-Version: 3.5.6
+Build-Depends: debhelper
+
+Package: bonnie++
+Architecture: any
+Depends: ${shlibs:Depends}
+Conflicts: zcav, bonnie, bonnie++
+Replaces: zcav, bonnie
+Provides: zcav, bonnie
+Description: Hard drive benchmark suite
+ Based on the old Bonnie benchmark, Bonnie++ is a toolkit for testing
+ hard drive and file system performance.
+ .
+ As well as bonnie++ itself and some output filters, the suite provides
+ some other benchmarking tools including zcav, which can compare the
+ raw-read performance of different zones on a drive, and gives output
+ suitable for gnuplot.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000..7eb5b28
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,12 @@
+This package was debianized by Russell Coker <russell@coker.com.au> on
+Wed, 13 Oct 1999 22:15:53 +0200.
+
+It was downloaded from http://www.coker.com.au/bonnie++/
+
+Upstream Author(s): Russell Coker <russell@coker.com.au>
+
+Copyright:
+
+GPL 2.0
+
+See /usr/share/common-licenses/GPL-2
diff --git a/debian/dirs b/debian/dirs
new file mode 100644
index 0000000..e78db9b
--- /dev/null
+++ b/debian/dirs
@@ -0,0 +1,2 @@
+usr/bin
+usr/share/doc
diff --git a/debian/docs b/debian/docs
new file mode 100644
index 0000000..4cf61f8
--- /dev/null
+++ b/debian/docs
@@ -0,0 +1,2 @@
+readme.html
+credits.txt
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000..9f41d8b
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,80 @@
+#!/usr/bin/make -f
+# Sample debian/rules that uses debhelper.
+# GNU copyright 1997 to 1999 by Joey Hess.
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+# This is the debhelper compatability version to use.
+export DH_COMPAT=4
+
+build: build-stamp
+build-stamp:
+ dh_testdir
+
+
+ # Add here commands to compile the package.
+ ./configure --prefix=`pwd`/debian/bonnie++/usr --mandir=`pwd`/debian/bonnie++/usr/share/man
+ $(MAKE)
+
+ touch build-stamp
+
+clean:
+ dh_testdir
+ dh_testroot
+ rm -f build-stamp
+
+ # Add here commands to clean up after the build process.
+ -$(MAKE) clean
+ rm -f config.*
+
+ dh_clean
+
+install: build
+ dh_testdir
+ dh_testroot
+ dh_clean -k
+ dh_installdirs
+
+ # Add here commands to install the package into debian/bonnie++.
+ $(MAKE) install-bin
+ ln -s bonnie++ debian/bonnie++/usr/sbin/bonnie
+
+ touch install-stamp
+
+# Build architecture-independent files here.
+binary-indep: build install
+# We have nothing to do by default.
+
+# Build architecture-dependent files here.
+binary-arch: build install
+# dh_testversion
+ dh_testdir
+ dh_testroot
+# dh_installdebconf
+ dh_installdocs
+# dh_installexamples
+# dh_installmenu
+# dh_installemacsen
+# dh_installpam
+# dh_installinit
+# dh_installcron
+ dh_installman *.1 *.8
+# dh_installinfo
+# dh_undocumented
+ dh_installchangelogs
+ dh_strip
+ dh_compress
+ dh_fixperms
+ # You may want to make some executables suid here.
+# dh_suidregister
+# dh_makeshlibs
+ dh_installdeb
+# dh_perl
+ dh_shlibdeps
+ dh_gencontrol
+ dh_md5sums
+ dh_builddeb
+
+binary: binary-indep binary-arch
+.PHONY: build clean binary-indep binary-arch binary install
diff --git a/depends b/depends
new file mode 100644
index 0000000..6ffa5e4
--- /dev/null
+++ b/depends
@@ -0,0 +1,18 @@
+# DO NOT DELETE
+
+bon_add.o: bonnie.h port.h
+bon_csv2html.o: bonnie.h port.h
+bon_file.o: bonnie.h port.h bon_file.h bon_time.h duration.h rand.h
+bon_io.o: bonnie.h port.h sync.h semaphore.h bon_io.h thread.h bon_time.h
+bon_io.o: duration.h rand.h
+bon_suid.o: bonnie.h port.h
+bon_time.o: bon_time.h bonnie.h port.h duration.h rand.h
+bonnie++.o: bonnie.h port.h sync.h semaphore.h bon_io.h thread.h bon_file.h
+bonnie++.o: bon_time.h duration.h rand.h
+duration.o: duration.h port.h
+rand.o: rand.h port.h
+semaphore.o: port.h semaphore.h
+sync.o: port.h semaphore.h sync.h
+thread.o: thread.h port.h
+zcav.o: port.h zcav_io.h bonnie.h duration.h thread.h
+zcav_io.o: zcav_io.h bonnie.h port.h duration.h
diff --git a/duration.cpp b/duration.cpp
new file mode 100644
index 0000000..efa3fd3
--- /dev/null
+++ b/duration.cpp
@@ -0,0 +1,61 @@
+using namespace std;
+
+#include <stdlib.h>
+
+#include "duration.h"
+#define TIMEVAL_TO_DOUBLE(XX) (double((XX).tv_sec) + double((XX).tv_usec) / 1000000.0)
+
+#include "conf.h"
+#include <unistd.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+
+#ifdef HAVE_ALGORITHM
+#include <algorithm>
+#else
+#ifdef HAVE_ALGO
+#include <algo>
+#else
+#include <algo.h>
+#endif
+#endif
+
+Duration_Base::Duration_Base()
+ : m_start(0.0)
+ , m_max(0.0)
+{
+}
+
+double Duration_Base::start()
+{
+ getTime(&m_start);
+ return m_start;
+}
+
+double Duration_Base::stop()
+{
+ double tv;
+ getTime(&tv);
+ double ret;
+ ret = tv - m_start;
+ m_max = __max(m_max, ret);
+ return ret;
+}
+
+bool Duration::getTime(double *tv)
+{
+ TIMEVAL_TYPE t;
+ if (gettimeofday(&t, static_cast<struct timezone *>(NULL)) == -1)
+ return true;
+ *tv = TIMEVAL_TO_DOUBLE(t);
+ return false;
+}
+
+bool CPU_Duration::getTime(double *tv)
+{
+ struct rusage res_usage;
+
+ getrusage(RUSAGE_SELF, &res_usage);
+ *tv = TIMEVAL_TO_DOUBLE(res_usage.ru_utime) + TIMEVAL_TO_DOUBLE(res_usage.ru_stime);
+ return false;
+}
diff --git a/duration.h b/duration.h
new file mode 100644
index 0000000..6246bff
--- /dev/null
+++ b/duration.h
@@ -0,0 +1,38 @@
+#ifndef DURATION_H
+#define DURATION_H
+
+#include "port.h"
+
+#include <sys/time.h>
+#include <unistd.h>
+
+class Duration_Base
+{
+public:
+ Duration_Base();
+ virtual ~Duration_Base() {};
+ void reset(){ m_max = 0.0; }
+ double start();
+ double stop();
+ double getMax() { return m_max; }
+
+ virtual bool getTime(double *tv) = 0;
+
+private:
+ double m_start;
+ double m_max;
+};
+
+class Duration : public Duration_Base
+{
+public:
+ virtual bool getTime(double *time);
+};
+
+class CPU_Duration : public Duration_Base
+{
+public:
+ virtual bool getTime(double *time);
+};
+
+#endif
diff --git a/generate_randfile.1 b/generate_randfile.1
new file mode 100644
index 0000000..78a8121
--- /dev/null
+++ b/generate_randfile.1
@@ -0,0 +1,41 @@
+.TH generate_randfile 1
+.SH "NAME"
+.BR generate_randfile
+\- a program to generate a file of random data
+.P
+
+.SH "SYNOPSIS"
+.BR generate_randfile
+[
+.BR \-s
+seed] [
+.BR \-f
+file]
+.BR count
+
+.SH "DESCRIPTION"
+Produces a file comprised of random integers in network-byte-order from the
+rand(3) library call.
+
+.SH "OPTIONS"
+.TP
+.B \-s
+the seed to use to create the random numbers.
+.TP
+.B \-f
+the file to create for writing the output (default is to use standard output).
+.TP
+.B count
+the number of integers to write.
+
+.SH "SEE ALSO"
+.BR rand (3)
+
+.SH "AUTHOR"
+These programs were written by Russell Coker <russell@coker.com.au>. May be
+freely used and distributed without restriction.
+
+.SH "AVAILABILITY"
+The source is available from http://www.coker.com.au/bonnie++ .
+.P
+See http://etbe.coker.com.au/category/benchmark for further information.
diff --git a/generate_randfile.cpp b/generate_randfile.cpp
new file mode 100644
index 0000000..c0f9421
--- /dev/null
+++ b/generate_randfile.cpp
@@ -0,0 +1,58 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <time.h>
+#include <netinet/in.h>
+
+void usage()
+{
+ fprintf(stderr, "Usage: generate_randfile [-s seed] [-f file] count\n");
+ exit(1);
+}
+
+int main(int argc, char **argv)
+{
+ if(argc < 2)
+ {
+ usage();
+ }
+ unsigned int seed = getpid() ^ time(NULL);
+ FILE *fp = stdout;
+ int int_c;
+ while(-1 != (int_c = getopt(argc, argv, "s:f:")) )
+ {
+ switch(char(int_c))
+ {
+ case '?':
+ case ':':
+ usage();
+ break;
+ case 's':
+ if(sscanf(optarg, "%u", &seed) != 1)
+ usage();
+ break;
+ case 'f':
+ fp = fopen(optarg, "w");
+ if(fp == NULL)
+ usage();
+ break;
+ }
+ }
+ if(optind >= argc)
+ usage();
+ int count = atoi(argv[optind]);
+ srand(seed);
+ fprintf(stderr, "Generating %d random numbers with seed %d.\n", count, seed);
+ for(int i = 0; i < count; i++)
+ {
+ unsigned long val = htonl(rand());
+ if(fwrite(&val, sizeof(val), 1, fp) != 1)
+ {
+ fprintf(stderr, "Can't write item %d.\n", i);
+ return 1;
+ }
+ }
+ if(fp != stdout)
+ fclose(fp);
+ return 0;
+}
diff --git a/getc_putc.8 b/getc_putc.8
new file mode 100644
index 0000000..7b9466f
--- /dev/null
+++ b/getc_putc.8
@@ -0,0 +1,109 @@
+.TH getc_putc 8
+.SH "NAME"
+getc_putc \- program to test hard drive performance.
+
+.SH "SYNOPSIS"
+.B getc_putc
+.I [\-d dir] [\-s size(KiB)] [-m machine-name]
+.I [\-u uid\-to\-use:gid\-to\-use] [\-g gid\-to\-use]
+
+.SH "DESCRIPTION"
+This manual page documents briefly the
+.BR getc_putc ,
+program.
+.P
+This is a simple adjunct to the
+.B bonnie++
+benchmark. It is used to test various ways of doing IO one byte at a time,
+usually you don't need to do enough of this for it to be a performance issue
+for it to matter much which way you do it. But sometimes it's necessary (for
+example whan parsing IO from a terminal and then launching another process
+which will take over all IO, such as a simple shell).
+.P
+The real benefits of this are to help settle some arguements about the
+performance of such things, and to educate novices about how bad per-byte IO
+really is.
+
+.SH "OPTIONS"
+For getc_putc every option is of the form of a hyphen followed by a letter and
+then the next parameter contains the value.
+.TP
+.B \-d
+the directory to use for the tests.
+.TP
+.B \-s
+the size of the file for byte IO performance measured in kilobytes. NB You can
+specify the size in mega\-bytes if you add 'm' to the end of the number.
+
+The default for this test is to test with a 40MiB file. Of the file only 1/32
+of it will be used for write() and read() system calls (anything else takes
+too long), and only 1/4 of it will be used for locked getc() and putc().
+
+.TP
+.B \-m
+name of the machine \- for display purposes only.
+
+.TP
+.B \-u
+user\-id to use. When running as root specify the UID to use for the tests. It
+is not recommended to use root, so if you really want to run as root then use
+.B \-u root.
+Also if you want to specify the group to run as then use the
+.B user:group
+format. If you specify a user by name but no group then the primary group of
+that user will be chosen. If you specify a user by number and no group then
+the group will be
+.B nogroup.
+
+.TP
+.B \-g
+group\-id to use. Same as using
+.B :group
+for the
+.B \-u
+parameter, just a different way to specify it for compatibility with other
+programs.
+
+.TP
+.B \-q
+quiet mode. If specified then some of the extra informational messages will be
+suppressed. Also the csv data will be the only output on standard out and the
+plain text data will be on standard error. This means you can run
+.B getc_putc \-q >> file.csv
+to record your csv data.
+
+.SH "OUTPUT"
+The primary output is plain\-text in 80 columns which is designed to fit well
+when pasted into email and which will work well with Braille displays.
+.P
+The second type of output is CSV (Comma Seperated Values). This can easily be
+imported into any spread\-sheet or database program.
+.P
+For every test the result is a speed in KiB/s. I do not display the CPU time
+because it presumably is 99% of the power of a single CPU (or something very
+close to that).
+
+.SH "AUTHOR"
+This program, it's manual page, and the Debian package were written by
+Russell Coker <russell@coker.com.au>.
+.P
+The documentation, the Perl scripts, and all the code for testing the creation
+of thousands of files was written by Russell Coker, but the entire package is
+under joint copyright with Tim Bray.
+
+.SH "SIGNALS"
+Handles SIGINT and does a cleanup (which may take some time), a second SIGINT
+or a SIGQUIT will cause it to immidiately die.
+.P
+SIGXCPU and SIGXFSZ act like SIGINT.
+.P
+Ignores SIGHUP.
+
+.SH "AVAILABILITY"
+The source is available from http://www.coker.com.au/bonnie++ .
+.P
+See http://etbe.coker.com.au/category/benchmark for further information.
+
+.SH "SEE ALSO"
+.BR bonnie++ (8),
+.BR zcav (8)
diff --git a/getc_putc.cpp b/getc_putc.cpp
new file mode 100644
index 0000000..6629cf8
--- /dev/null
+++ b/getc_putc.cpp
@@ -0,0 +1,284 @@
+#include "bonnie.h"
+
+#include <unistd.h>
+#include <sys/utsname.h>
+#include <stdlib.h>
+#include <cstring>
+#include <vector>
+
+#include "duration.h"
+#include "getc_putc.h"
+
+static void usage()
+{
+ fprintf(stderr, "usage:\n"
+ "getc_putc [-d scratch-dir] [-s size(KiB)] [-m machine-name]\n"
+ "[-u uid-to-use:gid-to-use] [-g gid-to-use]\n"
+ "\nVersion: " BON_VERSION "\n");
+ exit(eParam);
+}
+
+enum getc_tests_t
+{
+ Write = 0,
+ Read,
+ PutcNoTh,
+ GetcNoTh,
+ Putc,
+ Getc,
+ PutcUnlocked,
+ GetcUnlocked,
+ GetcTestCount
+};
+
+static void print_stat(FILE *fp, double elapsed, int test_size, bool csv);
+static void print_all_res(CPCCHAR machine, FILE *fp, double *res, int size, bool csv);
+
+#define WRITE_SIZE_FACT 32
+#define GETC_SIZE_FACT 4
+
+int main(int argc, char *argv[])
+{
+ int file_size = 40 << 10;
+ PCCHAR dir = ".";
+ bool quiet = false;
+ char *userName = NULL, *groupName = NULL;
+ PCCHAR machine = NULL;
+
+ int int_c;
+ while(-1 != (int_c = getopt(argc, argv, "d:s:u:g:m:q")) )
+ {
+ switch(char(int_c))
+ {
+ case '?':
+ case ':':
+ usage();
+ break;
+ case 'd':
+ dir = optarg;
+ break;
+ case 's':
+ file_size = size_from_str(optarg, "m");
+ break;
+ case 'q':
+ quiet = true;
+ break;
+ case 'm':
+ machine = optarg;
+ break;
+ case 'g':
+ if(groupName)
+ usage();
+ groupName = optarg;
+ break;
+ case 'u':
+ {
+ if(userName)
+ usage();
+ userName = strdup(optarg);
+ int i;
+ for(i = 0; userName[i] && userName[i] != ':'; i++) {}
+
+ if(userName[i] == ':')
+ {
+ if(groupName)
+ usage();
+ userName[i] = '\0';
+ groupName = &userName[i + 1];
+ }
+ }
+ break;
+ }
+ }
+
+ if(userName || groupName)
+ {
+ if(bon_setugid(userName, groupName, quiet))
+ return 1;
+ if(userName)
+ free(userName);
+ }
+ else if(geteuid() == 0)
+ {
+ fprintf(stderr, "You must use the \"-u\" switch when running as root.\n");
+ usage();
+ }
+
+ if(machine == NULL)
+ {
+ struct utsname utsBuf;
+ if(uname(&utsBuf) != -1)
+ machine = utsBuf.nodename;
+ }
+
+ file_size -= (file_size % WRITE_SIZE_FACT);
+ file_size = file_size << 10;
+ if(!file_size)
+ usage();
+
+ char *fname = new char[22 + strlen(dir)];
+
+ sprintf(fname, "%s/getc_putc.%d", dir, getpid());
+
+ int fd = open(fname, O_CREAT | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+ if(fd < 0)
+ {
+ fprintf(stderr, "Can't create file \"%s\".\n", fname);
+ usage();
+ }
+ if(dup2(fd, FILE_FD) != FILE_FD)
+ {
+ fprintf(stderr, "Can't dup2() the file handle.");
+ return 1;
+ }
+ close(fd);
+
+ if(!quiet)
+ printf("Extending file...");
+ fflush(NULL);
+ char buf[1 << 20];
+
+ int size = 0, wrote;
+ while(size < file_size)
+ {
+ wrote = write(FILE_FD, buf, min(sizeof(buf), (size_t)file_size - size));
+ if(wrote < 0)
+ {
+ fprintf(stderr, "Can't extend file - disk full?\n");
+ return 1;
+ }
+ size += wrote;
+ }
+ fsync(FILE_FD);
+ volatile char c;
+ int i;
+ Duration dur;
+ double res[GetcTestCount];
+
+ if(lseek(FILE_FD, 0, SEEK_SET) != 0)
+ {
+ fprintf(stderr, "Can't seek.\n");
+ return 1;
+ }
+ size = file_size / WRITE_SIZE_FACT;
+ TEST_FUNC_WRITE("write(fd, &c, 1)", if(write(FILE_FD, (void *)&c, 1) != 1), res[Write]);
+ fsync(FILE_FD);
+ if(lseek(FILE_FD, 0, SEEK_SET) != 0)
+ {
+ fprintf(stderr, "Can't seek.\n");
+ return 1;
+ }
+ TEST_FUNC_READ("read(fd, &c, 1)", if(read(FILE_FD, (void *)&c, 1) != 1), res[Read]);
+
+ char *prog = new char[strlen(argv[0]) + 30];
+ sprintf(prog, "%s_helper %d", argv[0], file_size);
+ if(quiet)
+ strcat(prog, "q");
+ FILE *child = popen(prog, "r");
+ if(!child)
+ {
+ fprintf(stderr, "Can't execute \"%s\".\n", prog);
+ return 1;
+ }
+ if(fread(&res[PutcNoTh], sizeof(double) * 2, 1, child) != 1)
+ {
+ fprintf(stderr, "Can't get results from child.\n");
+ return 1;
+ }
+ fclose(child);
+
+ FILE *fp = fdopen(FILE_FD, "w+");
+ if(!fp)
+ {
+ fprintf(stderr, "Can't reopen for putc.\n");
+ return 1;
+ }
+ if(fseek(fp, 0, SEEK_SET) != 0)
+ {
+ fprintf(stderr, "Can't seek.\n");
+ return 1;
+ }
+ fflush(NULL);
+ size = file_size / GETC_SIZE_FACT;
+ TEST_FUNC_WRITE("putc(c, fp)", if(putc(c, fp) == EOF), res[Putc]);
+ if(fseek(fp, 0, SEEK_SET) != 0)
+ {
+ fprintf(stderr, "Can't seek.\n");
+ return 1;
+ }
+ fflush(NULL);
+ TEST_FUNC_READ("getc()", if( (c = getc(fp)) == EOF), res[Getc]);
+ if(fseek(fp, 0, SEEK_SET) != 0)
+ {
+ fprintf(stderr, "Can't seek.\n");
+ return 1;
+ }
+ fflush(NULL);
+ size = file_size;
+ TEST_FUNC_WRITE("putc_unlocked(c, fp)", if(putc_unlocked(c, fp) == EOF), res[PutcUnlocked]);
+ if(fseek(fp, 0, SEEK_SET) != 0)
+ {
+ fprintf(stderr, "Can't seek.\n");
+ return 1;
+ }
+ fflush(NULL);
+ TEST_FUNC_READ("getc_unlocked()", if( (c = getc_unlocked(fp)) == EOF), res[GetcUnlocked]);
+
+ if(!quiet)
+ printf("done\n");
+ fclose(fp);
+ unlink(fname);
+ size = size / 1024;
+ print_all_res(machine, stderr, res, size, false);
+ print_all_res(machine, stdout, res, size, true);
+
+ return 0;
+}
+
+static void print_all_res(CPCCHAR machine, FILE *fp, double *res, int size, bool csv)
+{
+ if(!csv)
+ {
+ fprintf(fp, "Version %5s write read putcNT getcNT putc getc putcU getcU\n", BON_VERSION);
+ fprintf(fp, "%-20s ", machine);
+ }
+ else
+ {
+ fprintf(fp, "%s", machine);
+ }
+ print_stat(fp, res[Write], size / WRITE_SIZE_FACT, csv);
+ print_stat(fp, res[Read], size / WRITE_SIZE_FACT, csv);
+ print_stat(fp, res[PutcNoTh], size, csv);
+ print_stat(fp, res[GetcNoTh], size, csv);
+ print_stat(fp, res[Putc], size / GETC_SIZE_FACT, csv);
+ print_stat(fp, res[Getc], size / GETC_SIZE_FACT, csv);
+ print_stat(fp, res[PutcUnlocked], size, csv);
+ print_stat(fp, res[GetcUnlocked], size, csv);
+ fprintf(fp, "\n");
+}
+
+static void print_stat(FILE *fp, double elapsed, int test_size, bool csv)
+{
+ if(elapsed == 0.0)
+ {
+ if(!csv)
+ fprintf(fp, " ");
+ else
+ fprintf(fp, ",");
+ }
+ else if(elapsed < MinTime)
+ {
+ if(!csv)
+ fprintf(fp, " ++++++");
+ else
+ fprintf(fp, ",++++++");
+ }
+ else
+ {
+ double speed = double(test_size) / elapsed;
+ if(!csv)
+ fprintf(fp, " %6d", int(speed));
+ else
+ fprintf(fp, ",%d", int(speed));
+ }
+}
diff --git a/getc_putc.h b/getc_putc.h
new file mode 100644
index 0000000..1dabd43
--- /dev/null
+++ b/getc_putc.h
@@ -0,0 +1,29 @@
+#ifndef GETC_PUTC_H
+#define GETC_PUTC_H
+
+#define FILE_FD 253
+
+#define TEST_FUNC(XACTION, XNAME, XCODE, XRES) \
+ if(!quiet) fprintf(stderr, "done\n%s with %s...", XACTION, XNAME); \
+ fflush(NULL); \
+ dur.reset(); \
+ dur.start(); \
+ for(i = 0; i < size; i++) \
+ { \
+ XCODE \
+ { \
+ fprintf(stderr, "Can't %s!\n", XNAME); \
+ return 1; \
+ } \
+ c++; \
+ } \
+ XRES = dur.stop();
+
+#define TEST_FUNC_WRITE(XNAME, XCODE, XRES) \
+ TEST_FUNC("Writing", XNAME, c = 0x20 + (i & 0x3f); XCODE, XRES)
+
+#define TEST_FUNC_READ(XNAME, XCODE, XRES) \
+ TEST_FUNC("Reading", XNAME, XCODE, XRES)
+
+
+#endif
diff --git a/getc_putc_helper.cpp b/getc_putc_helper.cpp
new file mode 100644
index 0000000..efc4952
--- /dev/null
+++ b/getc_putc_helper.cpp
@@ -0,0 +1,64 @@
+#include "bonnie.h"
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <cstring>
+#include "duration.h"
+#include <vector>
+#include "getc_putc.h"
+
+int main(int argc, char *argv[])
+{
+ if(argc != 2)
+ {
+ fprintf(stderr, "Error - don't run this yourself, run getc_putc!\n");
+ return 1;
+ }
+
+ int size = atoi(argv[1]);
+ bool quiet = false;
+
+ if(argv[1][strlen(argv[1]) - 1] == 'q')
+ quiet = true;
+
+ volatile char c;
+ int i;
+ Duration dur;
+ double res[2];
+
+ FILE *fp = fdopen(FILE_FD, "w+");
+ if(!fp)
+ {
+ fprintf(stderr, "Can't reopen for putc.\n");
+ return 1;
+ }
+ if(fseek(fp, 0, SEEK_SET) != 0)
+ {
+ fprintf(stderr, "Can't seek.\n");
+ return 1;
+ }
+ fflush(NULL);
+ TEST_FUNC_WRITE("putc(c, fp) no thread", if(putc(c, fp) == EOF), res[0]);
+ if(fseek(fp, 0, SEEK_SET) != 0)
+ {
+ fprintf(stderr, "Can't seek.\n");
+ return 1;
+ }
+ fflush(NULL);
+ TEST_FUNC_READ("getc() no thread", if( (c = getc(fp)) == EOF), res[1]);
+ if(fseek(fp, 0, SEEK_SET) != 0)
+ {
+ fprintf(stderr, "Can't seek.\n");
+ return 1;
+ }
+ fflush(NULL);
+
+ if(write(1, res, sizeof(res)) != sizeof(res))
+ {
+ fprintf(stderr, "Can't write results to parent process.\n");
+ return 1;
+ }
+
+ return 0;
+}
+
diff --git a/install.sh b/install.sh
new file mode 100644
index 0000000..0c85d1d
--- /dev/null
+++ b/install.sh
@@ -0,0 +1,164 @@
+##
+## install -- Install a program, script or datafile
+## Copyright (c) 1997-2000 Ralf S. Engelschall <rse@engelschall.com>
+## Originally written for shtool
+##
+## This file is part of shtool and free software; you can redistribute
+## it and/or modify it under the terms of the GNU General Public
+## License as published by the Free Software Foundation; either version
+## 2 of the License, or (at your option) any later version.
+##
+## This file is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+## General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, write to the Free Software
+## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+## USA, or contact Ralf S. Engelschall <rse@engelschall.com>.
+##
+
+str_tool="install"
+str_usage="[-v] [-t] [-c] [-C] [-s] [-m<mode>] [-o<owner>] [-g<group>] [-e<ext>] <file> [<file> ...] <path>"
+arg_spec="2+"
+opt_spec="v.t.c.C.s.m:o:g:e:"
+opt_v=no
+opt_t=no
+opt_c=no
+opt_C=no
+opt_s=no
+opt_m=""
+opt_o=""
+opt_g=""
+opt_e=""
+
+. ./sh.common
+
+# determine source(s) and destination
+argc=$#
+srcs=""
+while [ $# -gt 1 ]; do
+ srcs="$srcs $1"
+ shift
+done
+dstpath="$1"
+
+# type check for destination
+dstisdir=0
+if [ -d $dstpath ]; then
+ dstpath=`echo "$dstpath" | sed -e 's:/$::'`
+ dstisdir=1
+fi
+
+# consistency check for destination
+if [ $argc -gt 2 -a $dstisdir = 0 ]; then
+ echo "$msgprefix:Error: multiple sources require destination to be directory" 1>&2
+ exit 1
+fi
+
+# iterate over all source(s)
+for src in $srcs; do
+ dst=$dstpath
+
+ # If destination is a directory, append the input filename
+ if [ $dstisdir = 1 ]; then
+ dstfile=`echo "$src" | sed -e 's;.*/\([^/]*\)$;\1;'`
+ dst="$dst/$dstfile"
+ fi
+
+ # Add a possible extension to src and dst
+ if [ ".$opt_e" != . ]; then
+ src="$src$opt_e"
+ dst="$dst$opt_e"
+ fi
+
+ # Check for correct arguments
+ if [ ".$src" = ".$dst" ]; then
+ echo "$msgprefix:Warning: source and destination are the same - skipped" 1>&2
+ continue
+ fi
+ if [ -d "$src" ]; then
+ echo "$msgprefix:Warning: source \`$src' is a directory - skipped" 1>&2
+ continue
+ fi
+
+ # Make a temp file name in the destination directory
+ dsttmp=`echo $dst |\
+ sed -e 's;[^/]*$;;' -e 's;\(.\)/$;\1;' -e 's;^$;.;' \
+ -e "s;\$;/#INST@$$#;"`
+
+ # Verbosity
+ if [ ".$opt_v" = .yes ]; then
+ echo "$src -> $dst" 1>&2
+ fi
+
+ # Copy or move the file name to the temp name
+ # (because we might be not allowed to change the source)
+ if [ ".$opt_C" = .yes ]; then
+ opt_c=yes
+ fi
+ if [ ".$opt_c" = .yes ]; then
+ if [ ".$opt_t" = .yes ]; then
+ echo "cp $src $dsttmp" 1>&2
+ fi
+ cp $src $dsttmp || exit $?
+ else
+ if [ ".$opt_t" = .yes ]; then
+ echo "mv $src $dsttmp" 1>&2
+ fi
+ mv $src $dsttmp || exit $?
+ fi
+
+ # Adjust the target file
+ # (we do chmod last to preserve setuid bits)
+ if [ ".$opt_s" = .yes ]; then
+ if [ ".$opt_t" = .yes ]; then
+ echo "strip $dsttmp" 1>&2
+ fi
+ strip $dsttmp || exit $?
+ fi
+ if [ ".$opt_o" != . ]; then
+ if [ ".$opt_t" = .yes ]; then
+ echo "chown $opt_o $dsttmp" 1>&2
+ fi
+ chown $opt_o $dsttmp || exit $?
+ fi
+ if [ ".$opt_g" != . ]; then
+ if [ ".$opt_t" = .yes ]; then
+ echo "chgrp $opt_g $dsttmp" 1>&2
+ fi
+ chgrp $opt_g $dsttmp || exit $?
+ fi
+ if [ ".$opt_m" != . ]; then
+ if [ ".$opt_t" = .yes ]; then
+ echo "chmod $opt_m $dsttmp" 1>&2
+ fi
+ chmod $opt_m $dsttmp || exit $?
+ fi
+
+ # Determine whether to do a quick install
+ # (has to be done _after_ the strip was already done)
+ quick=no
+ if [ ".$opt_C" = .yes ]; then
+ if [ -r $dst ]; then
+ if cmp -s $src $dst; then
+ quick=yes
+ fi
+ fi
+ fi
+
+ # Finally install the file to the real destination
+ if [ $quick = yes ]; then
+ if [ ".$opt_t" = .yes ]; then
+ echo "rm -f $dsttmp" 1>&2
+ fi
+ rm -f $dsttmp
+ else
+ if [ ".$opt_t" = .yes ]; then
+ echo "rm -f $dst && mv $dsttmp $dst" 1>&2
+ fi
+ rm -f $dst && mv $dsttmp $dst
+ fi
+done
+
diff --git a/port.h b/port.h
new file mode 100644
index 0000000..8d53622
--- /dev/null
+++ b/port.h
@@ -0,0 +1,74 @@
+#ifndef PORT_UNIX_H
+#define PORT_UNIX_H
+
+#include "conf.h"
+
+#ifndef HAVE_MIN_MAX
+#if defined(HAVE_ALGO_H) || defined(HAVE_ALGO)
+#ifdef HAVE_ALGO
+#include <algo>
+#else
+#include <algo.h>
+#endif
+#else
+#define min(XX,YY) ((XX) < (YY) ? (XX) : (YY))
+#define max(XX,YY) ((XX) > (YY) ? (XX) : (YY))
+#endif
+#endif
+
+
+
+
+#ifndef _LARGEFILE64_SOURCE
+#define _LARGEFILE64_SOURCE
+#endif
+#ifdef _LARGEFILE64_SOURCE
+#define OFF_T_PRINTF "%lld"
+#else
+#define OFF_T_PRINTF "%d"
+#endif
+
+#if 0
+#define false 0
+#define true 1
+#endif
+
+// UNIX here
+typedef struct timeval TIMEVAL_TYPE;
+
+#ifdef _LARGEFILE64_SOURCE
+#define OFF_TYPE off64_t
+#define file_lseek lseek64
+#define file_creat creat64
+#define file_open open64
+#else
+#define OFF_TYPE off_t
+#define file_lseek lseek
+#define file_creat creat
+#define file_open open
+#endif
+
+typedef int FILE_TYPE;
+#define __min min
+#define __max max
+typedef unsigned int UINT;
+typedef unsigned long ULONG;
+typedef const char * PCCHAR;
+typedef char * PCHAR;
+typedef PCHAR const CPCHAR;
+typedef PCCHAR const CPCCHAR;
+typedef void * PVOID;
+typedef PVOID const CPVOID;
+typedef const CPVOID CPCVOID;
+
+typedef FILE_TYPE *PFILE_TYPE;
+
+#define _strdup strdup
+
+#ifdef NO_SNPRINTF
+#define _snprintf sprintf
+#else
+#define _snprintf snprintf
+#endif
+
+#endif
diff --git a/port.h.in b/port.h.in
new file mode 100644
index 0000000..69c8f24
--- /dev/null
+++ b/port.h.in
@@ -0,0 +1,74 @@
+#ifndef PORT_UNIX_H
+#define PORT_UNIX_H
+
+#include "conf.h"
+
+#ifndef HAVE_MIN_MAX
+#if defined(HAVE_ALGO_H) || defined(HAVE_ALGO)
+#ifdef HAVE_ALGO
+#include <algo>
+#else
+#include <algo.h>
+#endif
+#else
+#define min(XX,YY) ((XX) < (YY) ? (XX) : (YY))
+#define max(XX,YY) ((XX) > (YY) ? (XX) : (YY))
+#endif
+#endif
+
+@semun@
+@bool@
+@snprintf@
+#ifndef _LARGEFILE64_SOURCE
+@large_file@
+#endif
+#ifdef _LARGEFILE64_SOURCE
+#define OFF_T_PRINTF "%lld"
+#else
+#define OFF_T_PRINTF "%d"
+#endif
+
+#if @true_false@
+#define false 0
+#define true 1
+#endif
+
+// UNIX here
+typedef struct timeval TIMEVAL_TYPE;
+
+#ifdef _LARGEFILE64_SOURCE
+#define OFF_TYPE off64_t
+#define file_lseek lseek64
+#define file_creat creat64
+#define file_open open64
+#else
+#define OFF_TYPE off_t
+#define file_lseek lseek
+#define file_creat creat
+#define file_open open
+#endif
+
+typedef int FILE_TYPE;
+#define __min min
+#define __max max
+typedef unsigned int UINT;
+typedef unsigned long ULONG;
+typedef const char * PCCHAR;
+typedef char * PCHAR;
+typedef PCHAR const CPCHAR;
+typedef PCCHAR const CPCCHAR;
+typedef void * PVOID;
+typedef PVOID const CPVOID;
+typedef const CPVOID CPCVOID;
+
+typedef FILE_TYPE *PFILE_TYPE;
+
+#define _strdup strdup
+
+#ifdef NO_SNPRINTF
+#define _snprintf sprintf
+#else
+#define _snprintf snprintf
+#endif
+
+#endif
diff --git a/rand.cpp b/rand.cpp
new file mode 100644
index 0000000..e94ad46
--- /dev/null
+++ b/rand.cpp
@@ -0,0 +1,56 @@
+#include "rand.h"
+#include <unistd.h>
+#include <netinet/in.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+bool Rand::seedFile(CPCCHAR name)
+{
+ int fd = file_open(name, O_RDONLY);
+ struct stat buf;
+ if(fd == -1 || fstat(fd, &buf) == -1)
+ {
+ fprintf(stderr, "Can't open random file \"%s\".\n", name);
+ if(fd != -1)
+ close(fd);
+ return true;
+ }
+ int size = buf.st_size / sizeof(int);
+ delete(m_arr);
+ m_arr = new int[size];
+ m_size = size;
+ if(size_t(read(fd, m_arr, size * sizeof(int))) != size * sizeof(int))
+ {
+ fprintf(stderr, "Can't read random data from \"%s\".\n", name);
+ return true;
+ }
+ for(int i = 0; i < size; i++)
+ {
+ m_arr[i] = abs(int(ntohl(m_arr[i])));
+ }
+ close(fd);
+ m_ind = -1;
+ m_name = string(name);
+ return false;
+}
+
+void Rand::seedNum(UINT num)
+{
+ delete(m_arr);
+ m_arr = NULL;
+ m_size = 0;
+ srand(num);
+ m_init = num;
+ char buf[12];
+ sprintf(buf, "%u", num);
+ m_name = string(buf);
+}
+
+void Rand::reset()
+{
+ if(m_arr)
+ m_ind = -1;
+ else
+ srand(m_init);
+}
diff --git a/rand.h b/rand.h
new file mode 100644
index 0000000..3359f55
--- /dev/null
+++ b/rand.h
@@ -0,0 +1,49 @@
+#ifndef RAND_H
+#define RAND_H
+
+using namespace std;
+#include "port.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string>
+
+class Rand
+{
+public:
+ Rand() : m_arr(NULL) , m_size(0) , m_ind(0) { }
+
+ bool seedFile(CPCCHAR name);
+
+ void seedNum(UINT num);
+
+ int getNum()
+ {
+ if(m_arr)
+ {
+ m_ind++;
+ if(m_ind >= m_size)
+ m_ind = 0;
+ return m_arr[m_ind];
+ }
+ else
+ return rand();
+ }
+
+ int getSize() { return m_size; }
+
+ string getSeed() { return m_name; }
+
+ void reset();
+
+private:
+ int *m_arr;
+ int m_size;
+ int m_ind;
+ string m_name;
+ UINT m_init;
+
+ Rand(const Rand &t);
+ Rand & operator =(const Rand &t);
+};
+
+#endif
diff --git a/readme.html b/readme.html
new file mode 100644
index 0000000..5d62a85
--- /dev/null
+++ b/readme.html
@@ -0,0 +1,178 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<HTML>
+<HEAD><TITLE>Bonnie++ Documentation</TITLE></HEAD>
+<BODY>
+
+<UL><LI><B>Introduction</B><BR>
+This benchmark is named <B>Bonnie++</B>, it is based on the <B>Bonnie</B>
+benchmark written by <A HREF="MAILTO:tbray@textuality.com">Tim Bray</A>. I was
+originally hoping to work with Tim on developing the
+next version of Bonnie, but we could not agree on the issue of whether C++
+should be used in the program. Tim has graciously given me permission to use
+the name "bonnie++" for my program which is based around his benchmark.<BR>
+Bonnie++ adds the facility to test more than 2G of storage on a 32bit
+machine, and tests for file creat(), stat(), unlink() operations.<BR>
+Also it will output in CSV spread-sheet format to standard output. If you use
+the "-q" option for quiet mode then the human-readable version will go to
+stderr so redirecting stdout to a file will get only the csv in the file.
+The program bon_csv2html takes csv format data on stdin and writes a HTML
+file on standard output which has a nice display of all the data. The program
+bon_csv2txt takes csv format data on stdin and writes a formatted plain text
+version on stdout, this was originally written to work with 80 column braille
+displays, but can also work well in email.<BR></LI>
+<LI><B>A note on blocking writes</B><BR>
+I have recently added a <B>-b</B> option to cause a fsync() after every
+write (and a fsync() of the directory after file create or delete). This is
+what you probably want to do if testing performance of mail or database
+servers as they like to sync everything. The default is to allow write-back
+caching in the OS which is what you want if testing performance for copying
+files, compiling, etc.<BR></LI>
+<LI><B>Waiting for semaphores</B><BR>
+There is often a need to test multiple types of IO at the same time. This is
+most important for testing RAID arrays where you will almost never see full
+performance with only one process active. Bonnie++ 2.0 will address this
+issue, but there is also a need for more flexibility than the ability to
+create multiple files in the same directory and fork processes to access
+them (which is what version 2.0 will do). There is also need to perform tests
+such as determining whether access to an NFS server will load the system and
+slow down access to a local hard drive. <A HREF="c.kagerhuber@t-online.net">
+Christian Kagerhuber</A> contributed the initial code to do semaphores so that
+several copies of Bonnie++ can be run in a synchronised fashion. This means
+you can have 8 copies of Bonnie++ doing per-char reads to test out your 8CPU
+system!</LI>
+<LI><B>Summary of tests</B><BR>
+The first 6 tests are from the original Bonnie: Specifically, these are the
+types of filesystem activity that have been observed to be bottlenecks in
+I/O-intensive applications, in particular the text database work done in
+connection with the New Oxford English Dictionary Project at the University
+of Waterloo.<BR>
+It initially performs a series of tests on a file (or files) of known size.
+By default, that size is 200 MiB (but that's not enough - see below). For
+each test, Bonnie reports the number of Kilo-bytes processed per elapsed
+second, and the % CPU usage (sum of user and system). If a size &gt;1G is
+specified then we will use a number of files of size 1G or less. This way
+we can use a 32bit program to test machines with 8G of RAM! NB I have not
+yet tested more than 2100M of file storage. If you test with larger storage
+then this please send me the results.<BR>
+The next 6 tests involve file create/stat/unlink to simulate some operations
+that are common bottlenecks on large Squid and INN servers, and machines with
+tens of thousands of mail files in /var/spool/mail.<BR>
+In each case, an attempt is made to keep optimizers from noticing it's
+all bogus. The idea is to make sure that these are real transfers to/from
+user space to the physical disk.<P></LI>
+<LI><B>Test Details</B><BR>
+<UL><LI>The file IO tests are:
+<OL>
+<LI><B>Sequential Output</B>
+<OL>
+<LI>Per-Character. The file is written using the putc() stdio macro.
+The loop that does the writing should be small enough to fit into any
+reasonable I-cache. The CPU overhead here is that required to do the
+stdio code plus the OS file space allocation.</LI>
+
+<LI>Block. The file is created using write(2). The CPU overhead
+should be just the OS file space allocation.</LI>
+
+<LI>Rewrite. Each BUFSIZ of the file is read with read(2), dirtied, and
+rewritten with write(2), requiring an lseek(2). Since no space
+allocation is done, and the I/O is well-localized, this should test the
+effectiveness of the filesystem cache and the speed of data transfer.</LI>
+</OL>
+</LI>
+
+<LI><B>Sequential Input</B>
+<OL>
+<LI>Per-Character. The file is read using the getc() stdio macro. Once
+again, the inner loop is small. This should exercise only stdio and
+sequential input.</LI>
+
+<LI>Block. The file is read using read(2). This should be a very pure
+test of sequential input performance.</LI>
+</OL>
+</LI>
+
+<LI><B>Random Seeks</B><BR>
+
+This test runs SeekProcCount processes (default 3) in parallel, doing a total of
+8000 lseek()s to locations in the file specified by random() in bsd systems,
+drand48() on sysV systems. In each case, the block is read with read(2).
+In 10% of cases, it is dirtied and written back with write(2).<BR>
+
+The idea behind the SeekProcCount processes is to make sure there's always
+a seek queued up.<BR>
+
+AXIOM: For any unix filesystem, the effective number of lseek(2) calls
+per second declines asymptotically to near 30, once the effect of
+caching is defeated.<BR>
+One thing to note about this is that the number of disks in a RAID set
+increases the number of seeks. For read using RAID-1 (mirroring) will
+double the number of seeks. For write using RAID-0 will multiply the number
+of writes by the number of disks in the RAID-0 set (provided that enough
+seek processes exist).<BR>
+
+The size of the file has a strong nonlinear effect on the results of
+this test. Many Unix systems that have the memory available will make
+aggressive efforts to cache the whole thing, and report random I/O rates
+in the thousands per second, which is ridiculous. As an extreme
+example, an IBM RISC 6000 with 64 MiB of memory reported 3,722 per second
+on a 50 MiB file. Some have argued that bypassing the cache is artificial
+since the cache is just doing what it's designed to. True, but in any
+application that requires rapid random access to file(s) significantly
+larger than main memory which is running on a system which is doing
+significant other work, the caches will inevitably max out. There is
+a hard limit hiding behind the cache which has been observed by the
+author to be of significant import in many situations - what we are trying
+to do here is measure that number.</LI>
+</OL>
+</LI>
+
+<LI>
+The file creation tests use file names with 7 digits numbers and a random
+number (from 0 to 12) of random alpha-numeric characters.
+For the sequential tests the random characters in the file name follow the
+number. For the random tests the random characters are first.<BR>
+The sequential tests involve creating the files in numeric order, then
+stat()ing them in readdir() order (IE the order they are stored in the
+directory which is very likely to be the same order as which they were
+created), and deleting them in the same order.<BR>
+For the random tests we create the files in an order that will appear
+random to the file system (the last 7 characters are in numeric order on
+the files). Then we stat() random files (NB this will return very good
+results on file systems with sorted directories because not every file
+will be stat()ed and the cache will be more effective). After that we
+delete all the files in random order.<BR>
+If a maximum size greater than 0 is specified then when each file is created
+it will have a random amount of data written to it. Then when the file is
+stat()ed it's data will be read.
+</LI>
+</UL>
+</LI>
+<LI><B>COPYRIGHT NOTICE</B><BR>
+* Copyright &copy; <A HREF="MAILTO:tbray@textuality.com">Tim Bray
+(tbray@textuality.com)</A>, 1990.<BR>
+* Copyright &copy; <A HREF="MAILTO:russell@coker.com.au">Russell Coker
+(russell@coker.com.au)</A> 1999.<P>
+I have updated the program, added support for &gt;2G on 32bit machines, and
+tests for file creation.<BR>
+Licensed under the GPL version 2.0.
+</LI><LI>
+<B>DISCLAIMER</B><BR>
+This program is provided AS IS with no warranty of any kind, and<BR>
+The author makes no representation with respect to the adequacy of this
+program for any particular purpose or with respect to its adequacy to
+produce any particular result, and<BR>
+The authors shall not be liable for loss or damage arising out of
+the use of this program regardless of how sustained, and
+In no event shall the author be liable for special, direct, indirect
+or consequential damage, loss, costs or fees or expenses of any
+nature or kind.<P>
+
+NB The results of running this program on live server machines can include
+extremely bad performance of server processes, and excessive consumption of
+disk space and/or Inodes which may cause the machine to cease performing it's
+designated tasks. Also the benchmark results are likely to be bad.<P>
+Do not run this program on live production machines.
+</LI>
+</UL>
+</BODY>
+</HTML>
diff --git a/semaphore.cpp b/semaphore.cpp
new file mode 100644
index 0000000..ee463e3
--- /dev/null
+++ b/semaphore.cpp
@@ -0,0 +1,129 @@
+#include "port.h"
+#include <stdio.h>
+#include "semaphore.h"
+#include <unistd.h>
+#include <stdlib.h>
+
+Semaphore::Semaphore(int semKey, int numSems, int val)
+ : m_semid(0)
+ , m_semflg(IPC_CREAT | 0666)
+ , m_semopen(false)
+ , m_semKey(semKey)
+ , m_numSems(numSems)
+{
+ m_arg.val = (0);
+ if(val)
+ {
+ if(create(val))
+ exit(1);
+ }
+}
+
+int Semaphore::clear_sem()
+{
+ int semid;
+ // have num-sems set to 1 so that we remove regardless of how many
+ // semaphores were present.
+ if((semid = semget(m_semKey, 1, 0666)) == -1)
+ {
+ perror("Can't get semaphore ID");
+ return 1;
+ }
+ if(semctl(semid, 0, IPC_RMID, m_arg) == -1)
+ {
+ perror("Can't get remove semaphore");
+ return 1;
+ }
+ printf("Semaphore removed.\n");
+ return 0;
+}
+
+int Semaphore::create(int count)
+{
+ if((m_semid = semget(m_semKey, m_numSems, m_semflg)) == -1)
+ {
+ perror("Can't get semaphore");
+ return 1;
+ }
+ m_arg.val = count;
+ int i;
+ for(i = 0; i < m_numSems; ++i)
+ {
+ if(semctl(m_semid, i, SETVAL, m_arg) == -1)
+ {
+ perror("Can't set semaphore value");
+ return 1;
+ }
+ }
+ m_semopen = true;
+ printf("Creating semaphore for %d procs.\n", count);
+ return 0;
+}
+
+int Semaphore::get_semid()
+{
+ int semflg = 0666;
+ if ((m_semid = semget(m_semKey, m_numSems, semflg)) == -1)
+ {
+ perror("Can't get semaphore ID");
+ return 1;
+ }
+ m_semopen = true;
+ return 0;
+}
+
+int Semaphore::decrement_and_wait(int nr_sem)
+{
+ if(!m_semopen)
+ return 0;
+ struct sembuf sops;
+ sops.sem_num = nr_sem;
+ sops.sem_op = -1;
+ sops.sem_flg = IPC_NOWAIT;
+ if(semop(m_semid, &sops, 1) == -1)
+ {
+ perror("semop: semop failed.\n");
+ return 1;
+ }
+ sops.sem_num = nr_sem;
+ sops.sem_op = 0;
+ sops.sem_flg = SEM_UNDO;
+ if(semop(m_semid, &sops, 1) == -1)
+ {
+ perror("semop: semop failed.\n");
+ return 1;
+ }
+ return 0;
+}
+
+int Semaphore::get_mutex()
+{
+ if(!m_semopen)
+ return 0;
+ struct sembuf sops;
+ sops.sem_num = 0;
+ sops.sem_op = -1;
+ sops.sem_flg = SEM_UNDO;
+ if(semop(m_semid, &sops, 1) == -1)
+ {
+ perror("semop: semop failed.\n");
+ return 1;
+ }
+ return 0;
+}
+
+int Semaphore::put_mutex()
+{
+ if(!m_semopen)
+ return 0;
+ struct sembuf sops;
+ sops.sem_num = 0;
+ sops.sem_op = 1;
+ sops.sem_flg = IPC_NOWAIT;
+ if(semop(m_semid, &sops, 1) == -1)
+ {
+ perror("semop: semop failed.\n");
+ return 1;
+ }
+ return 0;
+}
diff --git a/semaphore.h b/semaphore.h
new file mode 100644
index 0000000..5c24543
--- /dev/null
+++ b/semaphore.h
@@ -0,0 +1,51 @@
+#ifndef SEMAPHORE_H
+#define SEMAPHORE_H
+
+#include <sys/ipc.h>
+#include <sys/sem.h>
+
+#ifndef SEMUN_IN_SEM_H
+union semun
+{
+ int val; /* value for SETVAL */
+ struct semid_ds *buf; /* buffer for IPC_STAT, IPC_SET */
+ unsigned short int *array; /* array for GETALL, SETALL */
+ struct seminfo *__buf; /* buffer for IPC_INFO */
+};
+#endif
+
+class Semaphore
+{
+public:
+
+ // numSems is the number of semaphores to be in the set
+ // semKey is the ID number for the semaphore set
+ // val is the initial value for the semaphores, no values will be assigned
+ // if the default (0) is specified.
+ Semaphore(int semKey, int numSems = 1, int val = 0);
+
+ // clear the semaphores and return an error code.
+ int clear_sem();
+
+ // create the semaphores
+ // count is the initial value assigned to each semaphore
+ int create(int count);
+
+ // get the handle to a semaphore set previously created
+ int get_semid();
+
+ int decrement_and_wait(int nr_sem);
+ int get_mutex();
+ int put_mutex();
+
+private:
+ union semun m_arg;
+ int m_semid;
+ int m_semflg;
+ bool m_semopen;
+ int m_semKey;
+ int m_numSems;
+};
+
+#endif
+
diff --git a/sh.common b/sh.common
new file mode 100644
index 0000000..4ac6936
--- /dev/null
+++ b/sh.common
@@ -0,0 +1,154 @@
+##
+## This file is part of shtool and free software; you can redistribute
+## it and/or modify it under the terms of the GNU General Public
+## License as published by the Free Software Foundation; either version
+## 2 of the License, or (at your option) any later version.
+##
+## This file is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+## General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, write to the Free Software
+## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+## USA, or contact Ralf S. Engelschall <rse@engelschall.com>.
+##
+
+##
+## COMMON UTILITY CODE
+##
+
+# determine name of tool
+if [ ".$tool" != . ]; then
+ # used inside shtool script
+ toolcmd="$0 $tool"
+ toolcmdhelp="shtool $tool"
+ msgprefix="shtool:$tool"
+else
+ # used as standalone script
+ toolcmd="$0"
+ toolcmdhelp="sh $0"
+ msgprefix="$str_tool"
+fi
+
+# parse argument specification string
+eval `echo $arg_spec |\
+ sed -e 's/^\([0-9]*\)\([+=]\)/arg_NUMS=\1; arg_MODE=\2/'`
+
+# parse option specification string
+eval `echo h.$opt_spec |\
+ sed -e 's/\([a-zA-Z0-9]\)\([.:+]\)/opt_MODE_\1=\2;/g'`
+
+# interate over argument line
+opt_PREV=''
+while [ $# -gt 0 ]; do
+ # special option stops processing
+ if [ ".$1" = ".--" ]; then
+ shift
+ break
+ fi
+
+ # determine option and argument
+ opt_ARG_OK=no
+ if [ ".$opt_PREV" != . ]; then
+ # merge previous seen option with argument
+ opt_OPT="$opt_PREV"
+ opt_ARG="$1"
+ opt_ARG_OK=yes
+ opt_PREV=''
+ else
+ # split argument into option and argument
+ case "$1" in
+ -[a-zA-Z0-9]*)
+ eval `echo "x$1" |\
+ sed -e 's/^x-\([a-zA-Z0-9]\)/opt_OPT="\1";/' \
+ -e 's/";\(.*\)$/"; opt_ARG="\1"/'`
+ ;;
+ -[a-zA-Z0-9])
+ opt_OPT=`echo "x$1" | cut -c3-`
+ opt_ARG=''
+ ;;
+ *)
+ break
+ ;;
+ esac
+ fi
+
+ # eat up option
+ shift
+
+ # determine whether option needs an argument
+ eval "opt_MODE=\$opt_MODE_${opt_OPT}"
+ if [ ".$opt_ARG" = . -a ".$opt_ARG_OK" != .yes ]; then
+ if [ ".$opt_MODE" = ".:" -o ".$opt_MODE" = ".+" ]; then
+ opt_PREV="$opt_OPT"
+ continue
+ fi
+ fi
+
+ # process option
+ case $opt_MODE in
+ '.' )
+ # boolean option
+ eval "opt_${opt_OPT}=yes"
+ ;;
+ ':' )
+ # option with argument (multiple occurances override)
+ eval "opt_${opt_OPT}=\"\$opt_ARG\""
+ ;;
+ '+' )
+ # option with argument (multiple occurances append)
+ eval "opt_${opt_OPT}=\"\$opt_${opt_OPT} \$opt_ARG\""
+ ;;
+ * )
+ echo "$msgprefix:Error: unknown option: \`-$opt_OPT'" 1>&2
+ echo "$msgprefix:Hint: run \`$toolcmdhelp -h' or \`man shtool' for details" 1>&2
+ exit 1
+ ;;
+ esac
+done
+if [ ".$opt_PREV" != . ]; then
+ echo "$msgprefix:Error: missing argument to option \`-$opt_PREV'" 1>&2
+ echo "$msgprefix:Hint: run \`$toolcmdhelp -h' or \`man shtool' for details" 1>&2
+ exit 1
+fi
+
+# process help option
+if [ ".$opt_h" = .yes ]; then
+ echo "Usage: $toolcmdhelp $str_usage"
+ exit 0
+fi
+
+# complain about incorrect number of arguments
+case $arg_MODE in
+ '=' )
+ if [ $# -ne $arg_NUMS ]; then
+ echo "$msgprefix:Error: invalid number of arguments (exactly $arg_NUMS expected)" 1>&2
+ echo "$msgprefix:Hint: run \`$toolcmd -h' or \`man shtool' for details" 1>&2
+ exit 1
+ fi
+ ;;
+ '+' )
+ if [ $# -lt $arg_NUMS ]; then
+ echo "$msgprefix:Error: invalid number of arguments (at least $arg_NUMS expected)" 1>&2
+ echo "$msgprefix:Hint: run \`$toolcmd -h' or \`man shtool' for details" 1>&2
+ exit 1
+ fi
+ ;;
+esac
+
+# establish a temporary file on request
+if [ ".$gen_tmpfile" = .yes ]; then
+ if [ ".$TMPDIR" != . ]; then
+ tmpdir="$TMPDIR"
+ elif [ ".$TEMPDIR" != . ]; then
+ tmpdir="$TEMPDIR"
+ else
+ tmpdir="/tmp"
+ fi
+ tmpfile="$tmpdir/.shtool.$$"
+ rm -f $tmpfile >/dev/null 2>&1
+ touch $tmpfile
+fi
+
diff --git a/sun/Makefile b/sun/Makefile
new file mode 100644
index 0000000..297f83e
--- /dev/null
+++ b/sun/Makefile
@@ -0,0 +1,17 @@
+
+all: pkg
+
+INSTROOT=`pwd`/tmp
+PKGNAME=bonnie++
+
+../Makefile:
+ ( cd .. ; ./configure --prefix=/usr --mandir=`pwd`/sun/tmp/usr/share/man )
+
+pkg: ../Makefile
+ make -C .. WFLAGS="-Wall -W -Wshadow -Wpointer-arith -Wwrite-strings"
+ make -C .. prefix=${INSTROOT}/usr install
+ pkgmk -o -r ${INSTROOT}
+ pkgtrans -s spool ${PKGNAME}.pkg ${PKGNAME}
+ @echo
+ @echo "/var/spool/pkg/${PKGNAME}.pkg complete!"
+ @echo
diff --git a/sun/pkginfo b/sun/pkginfo
new file mode 100644
index 0000000..c1c2adb
--- /dev/null
+++ b/sun/pkginfo
@@ -0,0 +1,9 @@
+PKG=bonnie++
+ARCH=sparc
+VERSION=1.97
+CATEGORY=application
+NAME=Bonnie++
+DESC=Hard drive benchmark suite
+VENDOR=Russell Coker
+EMAIL=russell@coker.com.au
+MAXINST=1
diff --git a/sun/pkginfo.in b/sun/pkginfo.in
new file mode 100644
index 0000000..541ab73
--- /dev/null
+++ b/sun/pkginfo.in
@@ -0,0 +1,9 @@
+PKG=bonnie++
+ARCH=sparc
+VERSION=@version@
+CATEGORY=application
+NAME=Bonnie++
+DESC=Hard drive benchmark suite
+VENDOR=Russell Coker
+EMAIL=russell@coker.com.au
+MAXINST=1
diff --git a/sun/prototype b/sun/prototype
new file mode 100644
index 0000000..49513a8
--- /dev/null
+++ b/sun/prototype
@@ -0,0 +1,16 @@
+i pkginfo
+d none /usr 0775 root sys
+d none /usr/bin 0775 root bin
+f none /usr/bin/bon_csv2html 0755 root root
+f none /usr/bin/bon_csv2txt 0755 root root
+d none /usr/sbin 0775 root bin
+f none /usr/sbin/bonnie++ 0755 root root
+f none /usr/sbin/zcav 0755 root root
+d none /usr/share 0755 root sys
+d none /usr/share/man 0755 bin bin
+d none /usr/share/man/man1 0755 bin bin
+f none /usr/share/man/man1/bon_csv2html.1 0644 root root
+f none /usr/share/man/man1/bon_csv2txt.1 0644 root root
+d none /usr/share/man/man8 0755 bin bin
+f none /usr/share/man/man8/bonnie++.8 0644 root root
+f none /usr/share/man/man8/zcav.8 0644 root root
diff --git a/sun/tmp-conv b/sun/tmp-conv
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/sun/tmp-conv
diff --git a/sync.cpp b/sync.cpp
new file mode 100644
index 0000000..d35b5e8
--- /dev/null
+++ b/sync.cpp
@@ -0,0 +1,34 @@
+#include "port.h"
+#include "semaphore.h"
+#include "sync.h"
+#include <stdio.h>
+
+Sync::Sync(SYNC_TYPE type, int semKey, int num_tests)
+ : Semaphore(semKey, num_tests)
+ , m_type(type)
+{
+}
+
+int Sync::decrement_and_wait(int nr_sem)
+{
+ switch(m_type)
+ {
+ case eSem:
+ return Semaphore::decrement_and_wait(nr_sem);
+ case ePrompt:
+ printf("\n%d:\n", nr_sem);
+ fflush(NULL);
+ char buf[16];
+ fgets(buf, sizeof(buf) - 1, stdin);
+ case eNone:
+ break;
+ }
+ return 0;
+}
+
+int Sync::get_semid()
+{
+ if(m_type == eSem)
+ return Semaphore::get_semid();
+ return 0;
+}
diff --git a/sync.h b/sync.h
new file mode 100644
index 0000000..304fcbc
--- /dev/null
+++ b/sync.h
@@ -0,0 +1,22 @@
+#include "semaphore.h"
+
+typedef enum
+{
+ eSem, ePrompt, eNone
+} SYNC_TYPE;
+
+class Sync : public Semaphore
+{
+public:
+ Sync(SYNC_TYPE type, int semKey = 0, int num_tests = 0);
+
+ int decrement_and_wait(int nr_sem);
+
+ // get the handle to a semaphore set previously created
+ int get_semid();
+
+private:
+ SYNC_TYPE m_type;
+
+};
+
diff --git a/thread.cpp b/thread.cpp
new file mode 100644
index 0000000..815a5fd
--- /dev/null
+++ b/thread.cpp
@@ -0,0 +1,162 @@
+#include <stdlib.h>
+#include "thread.h"
+#include <stdio.h>
+
+#include <unistd.h>
+#include <time.h>
+#include <sys/wait.h>
+#include <pthread.h>
+
+Thread::Thread()
+ : m_read(-1)
+ , m_write(-1)
+ , m_threadNum(-1)
+ , m_thread_info(NULL)
+ , m_parentRead(-1)
+ , m_parentWrite(-1)
+ , m_childRead(-1)
+ , m_childWrite(-1)
+ , m_numThreads(0)
+ , m_retVal(NULL)
+{
+}
+
+Thread::Thread(int threadNum, const Thread *parent)
+ : m_read(parent->m_childRead)
+ , m_write(parent->m_childWrite)
+ , m_threadNum(threadNum)
+ , m_thread_info(NULL)
+ , m_parentRead(-1)
+ , m_parentWrite(-1)
+ , m_childRead(-1)
+ , m_childWrite(-1)
+ , m_numThreads(parent->m_numThreads)
+ , m_retVal(&parent->m_retVal[threadNum])
+{
+}
+
+Thread::~Thread()
+{
+ if(m_threadNum == -1)
+ {
+ for(int i = 0; i < m_numThreads; i++)
+ {
+ pthread_join(m_thread_info[i], NULL);
+ }
+ delete m_thread_info;
+ close(m_parentRead);
+ close(m_parentWrite);
+ close(m_childRead);
+ close(m_childWrite);
+ delete m_retVal;
+ }
+}
+
+// for the benefit of this function and the new Thread class it may create
+// the Thread class must do nothing of note in it's constructor or it's
+// go() member function.
+PVOID thread_func(PVOID param)
+{
+ THREAD_DATA *td = (THREAD_DATA *)param;
+ Thread *thread = td->f->newThread(td->threadNum);
+ thread->setRetVal(thread->action(td->param));
+ delete thread;
+ delete td;
+ return NULL;
+}
+
+void Thread::go(PVOID param, int num)
+{
+ m_numThreads += num;
+ FILE_TYPE control[2];
+ FILE_TYPE feedback[2];
+ if (pipe(feedback) || pipe(control))
+ {
+ fprintf(stderr, "Can't open pipes.\n");
+ exit(1);
+ }
+ m_parentRead = feedback[0];
+ m_parentWrite = control[1];
+ m_childRead = control[0];
+ m_childWrite = feedback[1];
+ m_read = m_parentRead;
+ m_write = m_parentWrite;
+ m_readPoll.events = POLLIN | POLLERR | POLLHUP | POLLNVAL;
+ m_writePoll.events = POLLOUT | POLLERR | POLLHUP | POLLNVAL;
+ m_readPoll.fd = m_parentRead;
+ m_writePoll.fd = m_parentWrite;
+ pthread_attr_t attr;
+ if(pthread_attr_init(&attr))
+ fprintf(stderr, "Can't init thread attributes.\n");
+ if(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE))
+ fprintf(stderr, "Can't set thread attributes.\n");
+ m_thread_info = new pthread_t[num];
+
+ m_retVal = new int[num + 1];
+ for(int i = 1; i <= num; i++)
+ {
+ m_retVal[i] = -1;
+ THREAD_DATA *td = new THREAD_DATA;
+ td->f = this;
+ td->param = param;
+ td->threadNum = i;
+ int p = pthread_create(&m_thread_info[i - 1], &attr, thread_func, PVOID(td));
+ if(p)
+ {
+ fprintf(stderr, "Can't create a thread.\n");
+ exit(1);
+ }
+ }
+ if(pthread_attr_destroy(&attr))
+ fprintf(stderr, "Can't destroy thread attributes.\n");
+ m_readPoll.fd = m_read;
+ m_writePoll.fd = m_write;
+}
+
+void Thread::setRetVal(int rc)
+{
+ *m_retVal = rc;
+}
+
+int Thread::Read(PVOID buf, int size, int timeout)
+{
+ if(timeout)
+ {
+ int rc = poll(&m_readPoll, 1, timeout * 1000);
+ if(rc < 0)
+ {
+ fprintf(stderr, "Can't poll read ITC.\n");
+ return -1;
+ }
+ if(!rc)
+ return 0;
+ }
+ if(size != read(m_read, buf, size) )
+ {
+ fprintf(stderr, "Can't read data from ITC pipe.\n");
+ return -1;
+ }
+ return size;
+}
+
+int Thread::Write(PVOID buf, int size, int timeout)
+{
+ if(timeout)
+ {
+ int rc = poll(&m_writePoll, 1, timeout * 1000);
+ if(rc < 0)
+ {
+ fprintf(stderr, "Can't poll write ITC.\n");
+ return -1;
+ }
+ if(!rc)
+ return 0;
+ }
+ if(size != write(m_write, buf, size))
+ {
+ fprintf(stderr, "Can't write data to ITC pipe.\n");
+ return -1;
+ }
+ return size;
+}
+
diff --git a/thread.h b/thread.h
new file mode 100644
index 0000000..1e7dbec
--- /dev/null
+++ b/thread.h
@@ -0,0 +1,76 @@
+#ifndef THREAD_H
+#define THREAD_H
+
+#include "port.h"
+
+#include <poll.h>
+#include <pthread.h>
+
+class Thread;
+
+typedef void *PVOID;
+
+typedef struct
+{
+ Thread *f;
+ PVOID param;
+ int threadNum;
+} THREAD_DATA;
+
+class Thread
+{
+protected:
+ // Virtual function that is called when the thread is started.
+ // The parameter is the pointer that is passed first to the go() function
+ virtual int action(PVOID param) = 0;
+
+ // constructor for main thread class
+ Thread();
+
+ // constructor for children.
+ Thread(int threadNum, const Thread *parent);
+ virtual ~Thread();
+
+ void go(PVOID param, int num); // creates all threads
+
+ int getNumThreads() const { return m_numThreads; }
+
+ // Virtual function to construct a new class.
+ // the following comment has the implementation
+ // return new class(threadNum, this);
+ virtual Thread *newThread(int threadNum) = 0;
+
+ // set the return value of the thread, probably not needed
+ void setRetVal(int rc);
+
+protected:
+ int getThreadNum() const { return m_threadNum; }
+ int Read(PVOID buf, int size, int timeout = 60);
+ int Write(PVOID buf, int size, int timeout = 60);
+
+protected:
+ FILE_TYPE m_read;
+ FILE_TYPE m_write;
+private:
+
+ int m_threadNum;
+
+ pollfd m_readPoll;
+ pollfd m_writePoll;
+ pthread_t *m_thread_info;
+ FILE_TYPE m_parentRead;
+ FILE_TYPE m_parentWrite;
+ FILE_TYPE m_childRead;
+ FILE_TYPE m_childWrite;
+ int m_numThreads;
+ int *m_retVal;
+
+ Thread(const Thread &f);
+ Thread & operator =(const Thread &f);
+
+
+friend PVOID thread_func(PVOID param);
+};
+
+#endif
+
diff --git a/util.cpp b/util.cpp
new file mode 100644
index 0000000..9bc27bc
--- /dev/null
+++ b/util.cpp
@@ -0,0 +1,24 @@
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "bonnie.h"
+
+unsigned int size_from_str(CPCCHAR str, CPCCHAR conv)
+{
+ const unsigned int mult[3] = { 1<<10 , 1<<20, 1<<30 };
+ unsigned int size = atoi(str);
+ char c = tolower(str[strlen(str) - 1]);
+ if(conv)
+ {
+ for(int i = 0; conv[i] != '\0' && i < 3; i++)
+ {
+ if(c == conv[i])
+ {
+ size *= mult[i];
+ return size;
+ }
+ }
+ }
+ return size;
+}
diff --git a/zcav.8 b/zcav.8
new file mode 100644
index 0000000..0748640
--- /dev/null
+++ b/zcav.8
@@ -0,0 +1,105 @@
+.TH zcav 8
+.SH "NAME"
+zcav \- program to test raw hard drive throughput.
+
+.SH "SYNOPSIS"
+.B zcav
+.I [\-b block\-size[:chunk\-size]] [\-c count] [\-r [first\-block]:last\-block]
+.I [\-w] [\-s skip\-rate] [\-u uid\-to\-use:gid\-to\-use] [\-g gid\-to\-use]
+.I [\-l log\-file] [\-f] file\-name
+.I [\-l log\-file [\-f] file\-name]...
+
+.SH "DESCRIPTION"
+This manual page documents briefly the
+.BR zcav ,
+program.
+.P
+Modern hard drives have a constant rotational speed but have varying numbers
+of sectors per track (outside tracks are longer and have more sectors). This
+is referred to as Zoned Constant Angular Velocity (or ZCAV). The outer tracks
+will have a higher data transfer rate due to having more sectors per track,
+these tracks generally have the lower track/sector numbers.
+.P
+This program tests the ZCAV performance of a hard drive, by reading the entire
+data on it a specified number of times. The file name given as the first
+parameter, it can be specified as
+.BR \- ,
+for standard input. This file will be opened as read\-only and in usual
+operation it will be
+.BR /dev/hdX
+or
+.BR /dev/ide/host0/busX/targetY/lun0/disc
+depending on whether you use devfs or not (NB operating systems other than
+Linux will have different device names).
+.P
+The output should be able to be easily graphed with
+.BR gnuplot
+which is what I use to view the results.
+
+.SH "OPTIONS"
+
+.TP
+.B \-b
+the size (in Meg) of the blocks to read/write (default 512M), optionally
+followed by a ':' and the chunk size for read/write operations (default 1M).
+Note that the chunk size must be less than or equal to the block size and
+must also be significantly less than the size of the RAM in the machine.
+Also note that for the write test there will be a fsync() after writing every
+chunk.
+
+.TP
+.B \-c
+the number of times to read/write the entire disk.
+
+.TP
+.B \-r
+the range of data (in Meg) to read/write on each pass (default the entire
+device). Useful if you want to quickly test part of a large drive. If a
+single number is given then that is the last block to read, if two numbers
+then it's the start and end of a range. Values are in megs, but they are
+rounded down to the block size.
+
+.TP
+.B \-s
+skip rate. The option \-s 10 will cause it to read every 10th block and skip
+the rest. Accepts values from 2 to 20.
+
+.TP
+.B \-f
+the file\-name for the input data. This isn't needed on well configured
+systems that have a recent Glibc where you can specify the file name without
+the \-f flag.
+
+.TP
+.B \-u
+user\-id to use. When running as root specify the UID to run the tests as, it
+is not recommended to use root, so if you want to run as root use
+.B \-u root.
+Also if you want to specify the group to run as then use the
+.B user:group
+format. If you specify a user by name but no group then the primary group of
+that user will be chosen. If you specify a user by number and no group then
+the group will be
+.B nogroup.
+
+.TP
+.B \-g
+group\-id to use. Same as using
+.B :group
+for the
+.B \-u
+parameter, just a different way to specify it for compatibility with other
+programs.
+
+.TP
+.B \-w
+write zero blocks to the disk instead of reading from the disk - will destroy data!
+
+.SH "AUTHOR"
+This program, it's manual page, and the Debian package were written by
+Russell Coker <russell@coker.com.au>.
+
+.SH "AVAILABILITY"
+The source is available from http://www.coker.com.au/bonnie++ .
+.P
+See http://etbe.coker.com.au/category/benchmark for further information.
diff --git a/zcav.cpp b/zcav.cpp
new file mode 100644
index 0000000..4e29286
--- /dev/null
+++ b/zcav.cpp
@@ -0,0 +1,287 @@
+#include "port.h"
+#include <unistd.h>
+
+#include "zcav_io.h"
+#include "thread.h"
+
+#include <cstdlib>
+#include <cstring>
+
+#define TOO_MANY_LOOPS 100
+
+void usage()
+{
+ fprintf(stderr
+ , "Usage: zcav [-b block-size[:chunk-size]] [-c count]\n"
+ " [-r [start offset:]end offset] [-w]\n"
+ " [-u uid-to-use:gid-to-use] [-g gid-to-use]\n"
+#ifdef _LARGEFILE64_SOURCE
+ " [-s skip rate]\n"
+#endif
+ " [-l log-file] [-f] file-name\n"
+ " [-l log-file [-f] file-name]...\n"
+ "\n"
+ "File name of \"-\" means standard input\n"
+ "Count is the number of times to read the data (default 1).\n"
+ "Max size is the amount of data to read from each device.\n"
+ "\n"
+ "Version: " BON_VERSION "\n");
+ exit(1);
+}
+
+class MultiZcav : public Thread
+{
+public:
+ MultiZcav();
+ MultiZcav(int threadNum, const MultiZcav *parent);
+ virtual ~MultiZcav();
+
+ virtual int action(PVOID param);
+
+ int runit();
+
+ void setFileLogNames(const char *file, const char *log)
+ {
+ m_fileNames.push_back(file);
+ m_logNames.push_back(log);
+ m_readers->push_back(new ZcavRead);
+ }
+
+ void setSizes(int block_size, int chunk_size)
+ {
+ m_block_size = block_size;
+ m_chunk_size = chunk_size;
+ if(m_block_size < 1 || m_chunk_size < 1 || m_chunk_size > m_block_size)
+ usage();
+ }
+
+ void setWrite(int do_write)
+ {
+ m_do_write = do_write;
+ }
+
+ void setLoops(int max_loops)
+ {
+ m_max_loops = max_loops;
+ if(max_loops < 1 || max_loops > TOO_MANY_LOOPS)
+ usage();
+ }
+
+ void setMaxSize(int max_size)
+ {
+ m_max_size = max_size;
+ if(max_size < 1)
+ usage();
+ }
+
+ void setStartOffset(int start_offset)
+ {
+ m_start_offset = start_offset;
+ if(start_offset < 1)
+ usage();
+ }
+
+ void setSkipRate(int skip_rate)
+ {
+ m_skip_rate = skip_rate;
+ if(skip_rate < 2 || skip_rate > 20)
+ usage();
+ }
+
+private:
+ virtual Thread *newThread(int threadNum)
+ { return new MultiZcav(threadNum, this); }
+
+ vector<const char *> m_fileNames, m_logNames;
+ vector<ZcavRead *> *m_readers;
+
+ int m_block_size, m_max_loops, m_max_size, m_start_offset, m_skip_rate;
+ int m_chunk_size, m_do_write;
+
+ MultiZcav(const MultiZcav &m);
+ MultiZcav & operator =(const MultiZcav &m);
+};
+
+MultiZcav::MultiZcav()
+{
+ m_block_size = DEFAULT_BLOCK_SIZE;
+ m_max_loops = 1;
+ m_max_size = 0;
+ m_start_offset = 0;
+ m_skip_rate = 1;
+ m_chunk_size = DEFAULT_CHUNK_SIZE;
+ m_do_write = 0;
+ m_readers = new vector<ZcavRead *>;
+}
+
+MultiZcav::MultiZcav(int threadNum, const MultiZcav *parent)
+ : Thread(threadNum, parent)
+ , m_readers(parent->m_readers)
+ , m_block_size(parent->m_block_size)
+ , m_max_loops(parent->m_max_loops)
+ , m_max_size(parent->m_max_size)
+ , m_start_offset(parent->m_start_offset)
+ , m_skip_rate(parent->m_skip_rate)
+{
+}
+
+int MultiZcav::action(PVOID)
+{
+ ZcavRead *zc = (*m_readers)[getThreadNum() - 1];
+ int rc = zc->Read(m_max_loops, m_max_size / m_block_size, m_write, m_skip_rate, m_start_offset / m_block_size);
+ zc->Close();
+ return rc;
+}
+
+MultiZcav::~MultiZcav()
+{
+ if(getThreadNum() < 1)
+ {
+ while(m_readers->size())
+ {
+ delete m_readers->back();
+ m_readers->pop_back();
+ }
+ delete m_readers;
+ }
+}
+
+int MultiZcav::runit()
+{
+ unsigned int i;
+ unsigned int num_threads = m_fileNames.size();
+ if(num_threads < 1)
+ usage();
+ for(i = 0; i < num_threads; i++)
+ {
+ if((*m_readers)[i]->Open(NULL, m_block_size, m_fileNames[i], m_logNames[i], m_chunk_size, m_do_write))
+ {
+ return 1;
+ }
+ }
+ go(NULL, num_threads);
+ int res = 0;
+ while(num_threads)
+ {
+ char c = 0;
+ if(Read(&c, 1, 0) != 1)
+ printf("can't read!\n");
+ num_threads--;
+ if(c > res)
+ res = c;
+ }
+ return res;
+}
+
+int main(int argc, char *argv[])
+{
+ MultiZcav mz;
+
+ if(argc < 2)
+ usage();
+
+ char *userName = NULL, *groupName = NULL;
+ int c;
+ int do_write = 0;
+ const char *log = "-";
+ const char *file = "";
+ while(-1 != (c = getopt(argc, argv, "-c:b:f:l:r:w"
+#ifdef _LARGEFILE64_SOURCE
+ "s:"
+#endif
+ "u:g:")) )
+ {
+ switch(char(c))
+ {
+ case 'b':
+ {
+ int block_size, chunk_size;
+ int rc = sscanf(optarg, "%d:%d", &block_size, &chunk_size);
+ if(rc == 1)
+ chunk_size = DEFAULT_CHUNK_SIZE;
+ else if(rc != 2)
+ usage();
+ mz.setSizes(block_size, chunk_size);
+ }
+ break;
+ case 'c':
+ mz.setLoops(atoi(optarg));
+ break;
+ case 'l':
+ log = optarg;
+ break;
+ case 'r':
+ {
+ int a, b, rc;
+ rc = sscanf(optarg, "%d:%d", &a, &b);
+ if(rc == 0)
+ usage();
+ if(rc == 1)
+ mz.setMaxSize(a);
+ else
+ {
+ mz.setStartOffset(a);
+ mz.setMaxSize(b);
+ }
+ }
+ break;
+#ifdef _LARGEFILE64_SOURCE
+ case 's':
+ mz.setSkipRate(atoi(optarg));
+ break;
+#endif
+ case 'g':
+ if(groupName)
+ usage();
+ groupName = optarg;
+ break;
+ case 'u':
+ {
+ if(userName)
+ usage();
+ userName = strdup(optarg);
+ int i;
+ for(i = 0; userName[i] && userName[i] != ':'; i++) {}
+
+ if(userName[i] == ':')
+ {
+ if(groupName)
+ usage();
+ userName[i] = '\0';
+ groupName = &userName[i + 1];
+ }
+ }
+ break;
+ case 'w':
+ mz.setWrite(1);
+ do_write = 1;
+ break;
+ case 'f':
+ case char(1):
+ mz.setFileLogNames(optarg, log);
+ file = optarg;
+ log = "-";
+ break;
+ default:
+ usage();
+ }
+ }
+
+ if(userName || groupName)
+ {
+ if(bon_setugid(userName, groupName, false))
+ return 1;
+ if(userName)
+ free(userName);
+ }
+
+ if(do_write)
+ {
+ fprintf(stderr, "Warning, writing to %s in 5 seconds.\n", file);
+ sleep(5);
+ }
+ int rc = mz.runit();
+ sleep(2); // time for all threads to complete
+ return rc;
+}
+
diff --git a/zcav_io.cpp b/zcav_io.cpp
new file mode 100644
index 0000000..5a5fffc
--- /dev/null
+++ b/zcav_io.cpp
@@ -0,0 +1,248 @@
+#include "zcav_io.h"
+
+#include <unistd.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <time.h>
+#include <stdlib.h>
+#include <string.h>
+
+ZcavRead::~ZcavRead()
+{
+ delete m_name;
+}
+
+int ZcavRead::Open(bool *finished, int block_size, const char *file
+ , const char *log, int chunk_size, int do_write)
+{
+ m_name = strdup(file);
+ m_finished = finished;
+ m_block_size = block_size;
+ m_chunk_size = chunk_size;
+ m_do_write = do_write;
+ m_buf = calloc(chunk_size * MEG, 1);
+
+ if(strcmp(file, "-"))
+ {
+ if(m_do_write)
+ m_fd = file_open(file, O_WRONLY);
+ else
+ m_fd = file_open(file, O_RDONLY);
+ if(m_fd == -1)
+ {
+ fprintf(stderr, "Can't open %s\n", file);
+ return 1;
+ }
+ }
+ else
+ {
+ m_fd = 0;
+ }
+ if(strcmp(log, "-"))
+ {
+ m_logFile = true;
+ m_log = fopen(log, "w");
+ if(m_log == NULL)
+ {
+ fprintf(stderr, "Can't open %s\n", log);
+ close(m_fd);
+ return 1;
+ }
+ }
+ else
+ {
+ m_logFile = false;
+ m_log = stdout;
+ }
+ return 0;
+}
+
+void ZcavRead::Close()
+{
+ if(m_logFile)
+ fclose(m_log);
+ if(m_fd != 0)
+ ::close(m_fd);
+}
+
+int ZcavRead::writeStatus(int fd, char c)
+{
+ if(write(fd, &c, 1) != 1)
+ {
+ fprintf(stderr, "Write channel broken\n");
+ return 1;
+ }
+ return 0;
+}
+
+int ZcavRead::Read(int max_loops, int max_size, int writeCom, int skip_rate, int start_offset)
+{
+ bool exiting = false;
+ if(max_loops == 1)
+ fprintf(m_log, "#block offset (GiB), MiB/s, time\n");
+ for(int loops = 0; !exiting && loops < max_loops; loops++)
+ {
+ int i = 0;
+#ifdef _LARGEFILE64_SOURCE
+ if(start_offset)
+ {
+ OFF_TYPE real_offset = OFF_TYPE(start_offset) * OFF_TYPE(m_block_size) * OFF_TYPE(1<<20);
+ if(file_lseek(m_fd, real_offset, SEEK_CUR) == OFF_TYPE(-1))
+ {
+ fprintf(stderr, "Can't lseek().\n");
+ writeStatus(writeCom, eSEEK);
+ return 1;
+ }
+ i = start_offset;
+ }
+ else
+#endif
+ if(lseek(m_fd, 0, SEEK_SET))
+ {
+ fprintf(stderr, "Can't lseek().\n");
+ writeStatus(writeCom, eSEEK);
+ return 1;
+ }
+
+ // i is block index
+ double total_read_time = 0.0;
+ bool nextLoop = false;
+ for( ; !nextLoop && (!max_size || i < max_size)
+ && (loops == 0 || (m_times[i] && m_times[i][0] != -1.0))
+ && (!max_size || i < max_size); i++)
+ {
+ double read_time = access_data(i ? skip_rate - 1 : 0);
+ if(read_time < 0.0)
+ {
+ if(i == 0)
+ {
+ fprintf(stderr, "Data file/device \"%s\" too small.\n", m_name);
+ writeStatus(writeCom, eSIZE);
+ return 1;
+ }
+ nextLoop = true;
+ break;
+ }
+ total_read_time += read_time;
+ if(max_loops == 1)
+ {
+ printavg(i * skip_rate, read_time, m_block_size);
+ }
+ else
+ {
+ if(loops == 0)
+ {
+ m_times.push_back(new double[max_loops]);
+ m_count.push_back(0);
+ }
+ m_times[i][loops] = read_time;
+ m_count[i]++;
+ }
+ } // end loop for reading blocks
+
+ time_t now = time(NULL);
+ struct tm *cur_time = localtime(&now);
+ fprintf(stderr, "# Finished loop %d, on device %s at %d:%02d:%02d\n"
+ , loops + 1, m_name, cur_time->tm_hour, cur_time->tm_min
+ , cur_time->tm_sec);
+ fprintf(m_log, "# Read %d megs in %d seconds, %d megabytes per second.\n"
+ , i * m_block_size, int(total_read_time)
+ , int(double(i * m_block_size) / total_read_time));
+
+ if(exiting)
+ return 1;
+ } // end loop for multiple disk reads
+ if(max_loops > 1)
+ {
+ fprintf(m_log, "#loops: %d\n", max_loops);
+ fprintf(m_log, "#block offset (GiB), MiB/s, time\n");
+ for(int i = 0; m_times[i]; i++)
+ printavg(i * skip_rate, average(m_times[i], m_count[i]), m_block_size);
+ }
+ writeStatus(writeCom, eEND);
+ return 0;
+}
+
+void ZcavRead::printavg(int position, double avg, int block_size)
+{
+ if(avg < 1.0)
+ fprintf(m_log, "#%.2f ++++ %.3f\n", float(position) * float(block_size) / 1024.0, avg);
+ else
+ fprintf(m_log, "%.2f %.2f %.3f\n", float(position) * float(block_size) / 1024.0, double(block_size) / avg, avg);
+}
+
+int compar(const void *a, const void *b)
+{
+ double *c = (double *)(a);
+ double *d = (double *)(b);
+ if(*c < *d) return -1;
+ if(*c > *d) return 1;
+ return 0;
+}
+
+// Returns the mean of the values in the array. If the array contains
+// more than 2 items then discard the highest and lowest thirds of the
+// results before calculating the mean.
+double average(double *array, int count)
+{
+ qsort(array, count, sizeof(double), compar);
+ int skip = count / 3;
+ int arr_items = count - (skip * 2);
+ double total = 0.0;
+ for(int i = skip; i < (count - skip); i++)
+ {
+ total += double(array[i]);
+ }
+ return total / double(arr_items);
+}
+
+// just like read() or write() but will not return a partial result and the
+// size is expressed in MEG.
+ssize_t ZcavRead::access_all(int count)
+{
+ ssize_t total = 0;
+ count *= MEG;
+ while(total != static_cast<ssize_t>(count) )
+ {
+ ssize_t rc;
+ // for both read and write just pass the base address of the buffer
+ // as we don't care for the data, if we ever do checksums we have to
+ // change this
+ if(m_do_write)
+ rc = write(m_fd, m_buf, count - total);
+ else
+ rc = read(m_fd, m_buf, count - total);
+ if(rc == -1 || rc == 0)
+ return -1;
+ total += rc;
+ }
+ if(m_do_write && fsync(m_fd))
+ return -1;
+ return total / MEG;
+}
+
+// Read/write a block of data
+double ZcavRead::access_data(int skip)
+{
+#ifdef _LARGEFILE64_SOURCE
+ if(skip)
+ {
+ OFF_TYPE real_offset = OFF_TYPE(skip) * OFF_TYPE(m_block_size) * OFF_TYPE(1<<20);
+ if(file_lseek(m_fd, real_offset, SEEK_CUR) == OFF_TYPE(-1))
+ return -1.0;
+ }
+#endif
+
+ m_dur.start();
+ for(int i = 0; i < m_block_size; i+= m_chunk_size)
+ {
+ int access_size = m_chunk_size;
+ if(i + m_chunk_size > m_block_size)
+ access_size = m_block_size - i;
+ int rc = access_all(access_size);
+ if(rc != access_size)
+ return -1.0;
+ }
+ return m_dur.stop();
+}
+
diff --git a/zcav_io.h b/zcav_io.h
new file mode 100644
index 0000000..89dc292
--- /dev/null
+++ b/zcav_io.h
@@ -0,0 +1,66 @@
+#ifndef ZCAV_IO_H
+#define ZCAV_IO_H
+
+#include "bonnie.h"
+#include <vector>
+
+#include "duration.h"
+using namespace std;
+
+enum results
+{
+ eEND = 0,
+ eSEEK = 1,
+ eSIZE = 2
+};
+
+// Returns the mean of the values in the array. If the array contains
+// more than 2 items then discard the highest and lowest thirds of the
+// results before calculating the mean.
+double average(double *array, int count);
+
+const int MEG = 1024*1024;
+const int DEFAULT_BLOCK_SIZE = 512;
+const int DEFAULT_CHUNK_SIZE = 1;
+
+class ZcavRead
+{
+public:
+ ZcavRead(){ m_name = NULL; }
+ ~ZcavRead();
+
+ int Open(bool *finished, int block_size, const char *file, const char *log
+ , int chunk_size, int do_write);
+ void Close();
+ int Read(int max_loops, int max_size, int writeCom, int skip_rate, int start_offset);
+
+private:
+ ssize_t access_all(int count);
+
+ // write the status to the parent thread
+ int writeStatus(int fd, char c);
+
+ // Read the m_block_count megabytes of data from the fd and return the
+ // amount of time elapsed in seconds.
+ double access_data(int skip);
+ void printavg(int position, double avg, int block_size);
+
+ bool *m_finished;
+ vector <double *> m_times;
+ vector<int> m_count; // number of times each block has been read
+ void *m_buf;
+ int m_fd;
+ FILE *m_log;
+ bool m_logFile;
+ int m_block_size;
+ char *m_name;
+ int m_chunk_size;
+ int m_do_write;
+ Duration m_dur;
+
+ ZcavRead(const ZcavRead &t);
+ ZcavRead & operator =(const ZcavRead &t);
+};
+
+#endif
+