A few more cleanups to avoid using temporary files for stdout and testing on decompression.

This commit is contained in:
Con Kolivas 2010-11-01 12:50:20 +11:00
parent 3a22eb09b3
commit cb27097cb4
2 changed files with 19 additions and 24 deletions

34
main.c
View file

@ -269,8 +269,7 @@ static void decompress_file(void)
}
if (STDIN) {
fd_in = open_tmpinfile();
read_tmpinfile(fd_in);
fd_in = 0;
} else {
fd_in = open(infilecopy, O_RDONLY);
if (fd_in == -1) {
@ -290,12 +289,16 @@ static void decompress_file(void)
if (!NO_SET_PERMS)
preserve_perms(fd_in, fd_out);
} else
fd_out = open_tmpoutfile();
fd_hist = open(control.outfile, O_RDONLY);
if (fd_hist == -1)
fatal("Failed to open history file %s\n", control.outfile);
fd_hist = open(control.outfile, O_RDONLY);
if (fd_hist == -1)
fatal("Failed to open history file %s\n", control.outfile);
} else if (TEST_ONLY) {
fd_out = open("/dev/null", O_WRONLY);
fd_hist = open("/dev/zero", O_RDONLY);
} else if (STDOUT) {
fd_out = 1;
fd_hist = 1;
}
read_magic(fd_in, &expected_size);
print_progress("Decompressing...");
@ -308,18 +311,15 @@ static void decompress_file(void)
print_output("Output filename is: %s: ", control.outfile);
print_progress("[OK] - %lld bytes \n", expected_size);
if (close(fd_hist) != 0 || close(fd_out) != 0)
fatal("Failed to close files\n");
if (TEST_ONLY | STDOUT) {
/* Delete temporary files generated for testing or faking stdout */
if (unlink(control.outfile) != 0)
fatal("Failed to unlink tmpfile: %s\n", strerror(errno));
if (!STDOUT) {
if (close(fd_hist) != 0 || close(fd_out) != 0)
fatal("Failed to close files\n");
}
close(fd_in);
if (!STDIN)
close(fd_in);
if (!(KEEP_FILES | TEST_ONLY) || STDIN) {
if (!(KEEP_FILES | TEST_ONLY)) {
if (unlink(control.infile) != 0)
fatal("Failed to unlink %s: %s\n", infilecopy, strerror(errno));
}

View file

@ -469,25 +469,20 @@ const i64 one_g = 1000 * 1024 * 1024;
/* This is a custom version of write() which writes in 1GB chunks to avoid
the overflows at the >= 2GB mark thanks to 32bit fuckage. This should help
even on the rare occasion write() fails to write 1GB as well. We can write
a null file if we're just testing. When decompressing to stdout we can
write directly to it since there will be no need to seek backwards. */
even on the rare occasion write() fails to write 1GB as well. */
ssize_t write_1g(int fd, void *buf, i64 len)
{
uchar *offset_buf = buf;
i64 total, offset;
ssize_t ret;
if (DECOMPRESS && STDOUT)
fd = 1;
total = offset = 0;
while (len > 0) {
if (len > one_g)
ret = one_g;
else
ret = len;
if (!TEST_ONLY)
ret = write(fd, offset_buf, (size_t)ret);
ret = write(fd, offset_buf, (size_t)ret);
if (ret < 0)
return ret;
len -= ret;