2010-03-29 01:07:08 +02:00
|
|
|
/*
|
2011-02-20 13:04:44 +01:00
|
|
|
Copyright (C) 2006-2011 Con Kolivas
|
2011-02-21 06:11:59 +01:00
|
|
|
Copyright (C) 2011 Peter Hyman
|
2010-12-15 23:45:21 +01:00
|
|
|
Copyright (C) 1998 Andrew Tridgell
|
2010-03-29 01:07:08 +02:00
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
2010-12-15 23:45:21 +01:00
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2010-03-29 01:07:08 +02:00
|
|
|
*/
|
|
|
|
|
/* multiplex N streams into a file - the streams are passed
|
|
|
|
|
through different compressors */
|
|
|
|
|
|
2011-03-08 22:34:44 +01:00
|
|
|
#ifdef HAVE_CONFIG_H
|
|
|
|
|
# include "config.h"
|
|
|
|
|
#endif
|
|
|
|
|
|
2011-03-08 22:37:26 +01:00
|
|
|
#ifdef HAVE_SYS_TIME_H
|
|
|
|
|
# include <sys/time.h>
|
|
|
|
|
#endif
|
|
|
|
|
#ifdef HAVE_SYS_RESOURCE_H
|
|
|
|
|
# include <sys/resource.h>
|
|
|
|
|
#endif
|
|
|
|
|
#include <sys/statvfs.h>
|
|
|
|
|
#include <pthread.h>
|
|
|
|
|
#include <bzlib.h>
|
|
|
|
|
#include <zlib.h>
|
|
|
|
|
#include <lzo/lzoconf.h>
|
|
|
|
|
#include <lzo/lzo1x.h>
|
|
|
|
|
#ifdef HAVE_ERRNO_H
|
|
|
|
|
# include <errno.h>
|
|
|
|
|
#endif
|
2011-03-20 06:16:38 +01:00
|
|
|
#include <endian.h>
|
2011-03-08 22:37:26 +01:00
|
|
|
|
|
|
|
|
/* LZMA C Wrapper */
|
|
|
|
|
#include "lzma/C/LzmaLib.h"
|
|
|
|
|
|
2011-03-08 22:34:44 +01:00
|
|
|
#include "util.h"
|
|
|
|
|
#include "zpipe.h"
|
2011-03-08 22:36:07 +01:00
|
|
|
#include "liblrzip.h"
|
2011-03-18 00:37:09 +01:00
|
|
|
#include "lrzip.h"
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2011-03-08 22:37:26 +01:00
|
|
|
|
|
|
|
|
#ifdef __APPLE__
|
|
|
|
|
# define fmemopen fake_fmemopen
|
|
|
|
|
# define open_memstream fake_open_memstream
|
|
|
|
|
# define memstream_update_buffer fake_open_memstream_update_buffer
|
|
|
|
|
# define mremap fake_mremap
|
|
|
|
|
#else
|
|
|
|
|
# define memstream_update_buffer(A, B, C) (0)
|
|
|
|
|
#endif
|
|
|
|
|
|
2010-12-08 11:25:00 +01:00
|
|
|
#define STREAM_BUFSIZE (1024 * 1024 * 10)
|
|
|
|
|
|
2011-02-26 10:11:43 +01:00
|
|
|
static struct compress_thread{
|
2010-11-12 15:26:09 +01:00
|
|
|
uchar *s_buf; /* Uncompressed buffer -> Compressed buffer */
|
2010-11-10 10:56:17 +01:00
|
|
|
uchar c_type; /* Compression type */
|
|
|
|
|
i64 s_len; /* Data length uncompressed */
|
|
|
|
|
i64 c_len; /* Data length compressed */
|
2011-02-16 14:24:28 +01:00
|
|
|
pthread_mutex_t mutex; /* This thread's mutex */
|
2010-11-12 15:26:09 +01:00
|
|
|
struct stream_info *sinfo;
|
2011-03-10 22:35:15 +01:00
|
|
|
int streamno;
|
2011-03-20 05:45:44 +01:00
|
|
|
uchar salt[SALT_LEN];
|
2010-11-10 10:56:17 +01:00
|
|
|
} *cthread;
|
|
|
|
|
|
2011-02-26 10:11:43 +01:00
|
|
|
static struct uncomp_thread{
|
2010-11-16 11:25:32 +01:00
|
|
|
uchar *s_buf;
|
|
|
|
|
i64 u_len, c_len;
|
2010-11-24 10:12:19 +01:00
|
|
|
i64 last_head;
|
2010-11-16 11:25:32 +01:00
|
|
|
uchar c_type;
|
2011-02-07 22:55:36 +01:00
|
|
|
int busy;
|
2011-03-10 22:35:15 +01:00
|
|
|
int streamno;
|
2010-11-16 11:25:32 +01:00
|
|
|
} *ucthread;
|
|
|
|
|
|
2011-03-10 22:33:35 +01:00
|
|
|
typedef struct stream_thread_struct {
|
|
|
|
|
int i;
|
|
|
|
|
rzip_control *control;
|
|
|
|
|
} stream_thread_struct;
|
|
|
|
|
|
|
|
|
|
static long output_thread;
|
|
|
|
|
static pthread_mutex_t output_lock = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
|
static pthread_cond_t output_cond = PTHREAD_COND_INITIALIZER;
|
|
|
|
|
static pthread_t *threads;
|
|
|
|
|
|
2011-02-16 14:24:28 +01:00
|
|
|
static void init_mutex(pthread_mutex_t *mutex)
|
2011-02-16 07:40:50 +01:00
|
|
|
{
|
2011-02-16 14:24:28 +01:00
|
|
|
if (unlikely(pthread_mutex_init(mutex, NULL)))
|
|
|
|
|
fatal("pthread_mutex_init failed");
|
2011-02-16 07:40:50 +01:00
|
|
|
}
|
|
|
|
|
|
2011-02-16 14:24:28 +01:00
|
|
|
static void unlock_mutex(pthread_mutex_t *mutex)
|
2011-02-16 07:40:50 +01:00
|
|
|
{
|
2011-02-16 14:24:28 +01:00
|
|
|
if (unlikely(pthread_mutex_unlock(mutex)))
|
|
|
|
|
fatal("pthread_mutex_unlock failed");
|
2011-02-16 07:40:50 +01:00
|
|
|
}
|
|
|
|
|
|
2011-02-16 14:24:28 +01:00
|
|
|
static void lock_mutex(pthread_mutex_t *mutex)
|
2011-02-16 07:40:50 +01:00
|
|
|
{
|
2011-02-16 14:24:28 +01:00
|
|
|
if (unlikely(pthread_mutex_lock(mutex)))
|
|
|
|
|
fatal("pthread_mutex_lock failed");
|
2011-02-16 07:40:50 +01:00
|
|
|
}
|
|
|
|
|
|
2011-02-16 14:24:28 +01:00
|
|
|
static void cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
2011-02-16 07:40:50 +01:00
|
|
|
{
|
2011-02-16 14:24:28 +01:00
|
|
|
if (unlikely(pthread_cond_wait(cond, mutex)))
|
|
|
|
|
fatal("pthread_cond_wait failed");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void cond_broadcast(pthread_cond_t *cond)
|
|
|
|
|
{
|
|
|
|
|
if (unlikely(pthread_cond_broadcast(cond)))
|
|
|
|
|
fatal("pthread_cond_broadcast failed");
|
2011-02-16 07:40:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void create_pthread(pthread_t * thread, pthread_attr_t * attr,
|
|
|
|
|
void * (*start_routine)(void *), void *arg)
|
|
|
|
|
{
|
|
|
|
|
if (pthread_create(thread, attr, start_routine, arg))
|
|
|
|
|
fatal("pthread_create");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void join_pthread(pthread_t th, void **thread_return)
|
|
|
|
|
{
|
|
|
|
|
if (pthread_join(th, thread_return))
|
|
|
|
|
fatal("pthread_join");
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-29 01:07:08 +02:00
|
|
|
/* just to keep things clean, declare function here
|
|
|
|
|
* but move body to the end since it's a work function
|
|
|
|
|
*/
|
2011-03-08 22:32:14 +01:00
|
|
|
static int lzo_compresses(rzip_control *control, uchar *s_buf, i64 s_len);
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-11-03 03:14:46 +01:00
|
|
|
static inline FILE *fake_fmemopen(void *buf, size_t buflen, char *mode)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
|
|
|
|
FILE *in;
|
|
|
|
|
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(strcmp(mode, "r")))
|
2011-02-21 04:51:20 +01:00
|
|
|
failure("fake_fmemopen only supports mode \"r\".");
|
2010-03-29 01:07:08 +02:00
|
|
|
in = tmpfile();
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(!in))
|
|
|
|
|
return NULL;
|
|
|
|
|
if (unlikely(fwrite(buf, buflen, 1, in) != 1))
|
|
|
|
|
return NULL;
|
2010-03-29 01:07:08 +02:00
|
|
|
rewind(in);
|
|
|
|
|
return in;
|
|
|
|
|
}
|
|
|
|
|
|
2010-11-03 03:14:46 +01:00
|
|
|
static inline FILE *fake_open_memstream(char **buf, size_t *length)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
|
|
|
|
FILE *out;
|
|
|
|
|
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(buf == NULL || length == NULL))
|
2011-02-21 04:51:20 +01:00
|
|
|
failure("NULL parameter to fake_open_memstream");
|
2010-03-29 01:07:08 +02:00
|
|
|
out = tmpfile();
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(!out))
|
2010-03-29 01:07:08 +02:00
|
|
|
return NULL;
|
|
|
|
|
return out;
|
|
|
|
|
}
|
|
|
|
|
|
2010-11-03 03:14:46 +01:00
|
|
|
static inline int fake_open_memstream_update_buffer(FILE *fp, uchar **buf, size_t *length)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
|
|
|
|
long original_pos = ftell(fp);
|
|
|
|
|
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(fseek(fp, 0, SEEK_END)))
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
|
|
|
|
*length = ftell(fp);
|
|
|
|
|
rewind(fp);
|
|
|
|
|
*buf = (uchar *)malloc(*length);
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(!*buf))
|
|
|
|
|
return -1;
|
|
|
|
|
if (unlikely(fread(*buf, *length, 1, fp) != 1))
|
|
|
|
|
return -1;
|
|
|
|
|
if (unlikely(fseek(fp, original_pos, SEEK_SET)))
|
|
|
|
|
return -1;
|
2010-03-29 01:07:08 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
***** COMPRESSION FUNCTIONS *****
|
|
|
|
|
|
|
|
|
|
ZPAQ, BZIP, GZIP, LZMA, LZO
|
|
|
|
|
|
|
|
|
|
try to compress a buffer. If compression fails for whatever reason then
|
|
|
|
|
leave uncompressed. Return the compression type in c_type and resulting
|
|
|
|
|
length in c_len
|
|
|
|
|
*/
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
static int zpaq_compress_buf(rzip_control *control, struct compress_thread *cthread, long thread)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2010-11-12 15:26:09 +01:00
|
|
|
uchar *c_buf = NULL;
|
2010-11-03 03:14:46 +01:00
|
|
|
size_t dlen = 0;
|
2010-03-29 01:07:08 +02:00
|
|
|
FILE *in, *out;
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
if (!lzo_compresses(control, cthread->s_buf, cthread->s_len))
|
2010-12-11 03:19:34 +01:00
|
|
|
return 0;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-11-10 10:56:17 +01:00
|
|
|
in = fmemopen(cthread->s_buf, cthread->s_len, "r");
|
2010-12-11 03:19:34 +01:00
|
|
|
if (unlikely(!in)) {
|
2011-02-24 01:52:30 +01:00
|
|
|
print_err("Failed to fmemopen in zpaq_compress_buf\n");
|
2010-12-11 03:19:34 +01:00
|
|
|
return -1;
|
|
|
|
|
}
|
2010-11-12 15:26:09 +01:00
|
|
|
out = open_memstream((char **)&c_buf, &dlen);
|
2010-12-11 03:19:34 +01:00
|
|
|
if (unlikely(!out)) {
|
|
|
|
|
fclose(in);
|
|
|
|
|
print_maxverbose("Failed to open_memstream in zpaq_compress_buf\n");
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
zpipe_compress(in, out, control->msgout, cthread->s_len,
|
2010-11-16 11:25:32 +01:00
|
|
|
(int)(SHOW_PROGRESS), thread);
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-11-12 15:26:09 +01:00
|
|
|
if (unlikely(memstream_update_buffer(out, &c_buf, &dlen)))
|
2010-03-29 01:07:08 +02:00
|
|
|
fatal("Failed to memstream_update_buffer in zpaq_compress_buf");
|
|
|
|
|
|
|
|
|
|
fclose(in);
|
|
|
|
|
fclose(out);
|
|
|
|
|
|
2010-12-15 12:54:15 +01:00
|
|
|
if (unlikely((i64)dlen >= cthread->c_len)) {
|
2010-11-10 10:56:17 +01:00
|
|
|
print_maxverbose("Incompressible block\n");
|
2010-03-29 01:07:08 +02:00
|
|
|
/* Incompressible, leave as CTYPE_NONE */
|
2010-11-12 15:26:09 +01:00
|
|
|
free(c_buf);
|
2010-12-11 03:19:34 +01:00
|
|
|
return 0;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2010-11-10 10:56:17 +01:00
|
|
|
cthread->c_len = dlen;
|
2010-11-12 15:26:09 +01:00
|
|
|
free(cthread->s_buf);
|
|
|
|
|
cthread->s_buf = c_buf;
|
2010-11-10 10:56:17 +01:00
|
|
|
cthread->c_type = CTYPE_ZPAQ;
|
2010-12-11 03:19:34 +01:00
|
|
|
return 0;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
static int bzip2_compress_buf(rzip_control *control, struct compress_thread *cthread)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2010-11-10 10:56:17 +01:00
|
|
|
u32 dlen = cthread->s_len;
|
2010-12-15 12:54:15 +01:00
|
|
|
int bzip2_ret;
|
2010-11-12 15:26:09 +01:00
|
|
|
uchar *c_buf;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
if (!lzo_compresses(control, cthread->s_buf, cthread->s_len))
|
2010-12-11 03:19:34 +01:00
|
|
|
return 0;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-11-12 15:26:09 +01:00
|
|
|
c_buf = malloc(dlen);
|
2010-12-11 03:19:34 +01:00
|
|
|
if (!c_buf) {
|
2011-02-24 01:52:30 +01:00
|
|
|
print_err("Unable to allocate c_buf in bzip2_compress_buf\n");
|
2010-12-11 03:19:34 +01:00
|
|
|
return -1;
|
|
|
|
|
}
|
2010-11-12 15:26:09 +01:00
|
|
|
|
2010-12-15 12:54:15 +01:00
|
|
|
bzip2_ret = BZ2_bzBuffToBuffCompress((char *)c_buf, &dlen,
|
2010-11-10 10:56:17 +01:00
|
|
|
(char *)cthread->s_buf, cthread->s_len,
|
2011-03-08 22:32:14 +01:00
|
|
|
control->compression_level, 0, control->compression_level * 10);
|
2010-12-15 12:54:15 +01:00
|
|
|
|
|
|
|
|
/* if compressed data is bigger then original data leave as
|
|
|
|
|
* CTYPE_NONE */
|
|
|
|
|
|
|
|
|
|
if (bzip2_ret == BZ_OUTBUFF_FULL) {
|
|
|
|
|
print_maxverbose("Incompressible block\n");
|
|
|
|
|
/* Incompressible, leave as CTYPE_NONE */
|
|
|
|
|
free(c_buf);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (unlikely(bzip2_ret != BZ_OK)) {
|
|
|
|
|
free(c_buf);
|
|
|
|
|
print_maxverbose("BZ2 compress failed\n");
|
|
|
|
|
return -1;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2010-12-11 03:19:34 +01:00
|
|
|
if (unlikely(dlen >= cthread->c_len)) {
|
2010-11-10 10:56:17 +01:00
|
|
|
print_maxverbose("Incompressible block\n");
|
2010-03-29 01:07:08 +02:00
|
|
|
/* Incompressible, leave as CTYPE_NONE */
|
2010-11-12 15:26:09 +01:00
|
|
|
free(c_buf);
|
2010-12-11 03:19:34 +01:00
|
|
|
return 0;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2010-11-10 10:56:17 +01:00
|
|
|
cthread->c_len = dlen;
|
2010-11-12 15:26:09 +01:00
|
|
|
free(cthread->s_buf);
|
|
|
|
|
cthread->s_buf = c_buf;
|
2010-11-10 10:56:17 +01:00
|
|
|
cthread->c_type = CTYPE_BZIP2;
|
2010-12-11 03:19:34 +01:00
|
|
|
return 0;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
static int gzip_compress_buf(rzip_control *control, struct compress_thread *cthread)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2010-11-10 10:56:17 +01:00
|
|
|
unsigned long dlen = cthread->s_len;
|
2010-11-12 15:26:09 +01:00
|
|
|
uchar *c_buf;
|
2010-12-15 12:54:15 +01:00
|
|
|
int gzip_ret;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-11-12 15:26:09 +01:00
|
|
|
c_buf = malloc(dlen);
|
2010-12-11 03:19:34 +01:00
|
|
|
if (!c_buf) {
|
2011-02-24 01:52:30 +01:00
|
|
|
print_err("Unable to allocate c_buf in gzip_compress_buf\n");
|
2010-12-11 03:19:34 +01:00
|
|
|
return -1;
|
|
|
|
|
}
|
2010-11-12 15:26:09 +01:00
|
|
|
|
2010-12-15 12:54:15 +01:00
|
|
|
gzip_ret = compress2(c_buf, &dlen, cthread->s_buf, cthread->s_len,
|
2011-03-08 22:32:14 +01:00
|
|
|
control->compression_level);
|
2010-12-15 12:54:15 +01:00
|
|
|
|
|
|
|
|
/* if compressed data is bigger then original data leave as
|
|
|
|
|
* CTYPE_NONE */
|
|
|
|
|
|
|
|
|
|
if (gzip_ret == Z_BUF_ERROR) {
|
|
|
|
|
print_maxverbose("Incompressible block\n");
|
|
|
|
|
/* Incompressible, leave as CTYPE_NONE */
|
|
|
|
|
free(c_buf);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (unlikely(gzip_ret != Z_OK)) {
|
|
|
|
|
free(c_buf);
|
|
|
|
|
print_maxverbose("compress2 failed\n");
|
|
|
|
|
return -1;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2010-12-15 12:54:15 +01:00
|
|
|
if (unlikely((i64)dlen >= cthread->c_len)) {
|
2010-11-10 10:56:17 +01:00
|
|
|
print_maxverbose("Incompressible block\n");
|
2010-03-29 01:07:08 +02:00
|
|
|
/* Incompressible, leave as CTYPE_NONE */
|
2010-11-12 15:26:09 +01:00
|
|
|
free(c_buf);
|
2010-12-11 03:19:34 +01:00
|
|
|
return 0;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2010-11-10 10:56:17 +01:00
|
|
|
cthread->c_len = dlen;
|
2010-11-12 15:26:09 +01:00
|
|
|
free(cthread->s_buf);
|
|
|
|
|
cthread->s_buf = c_buf;
|
2010-11-10 10:56:17 +01:00
|
|
|
cthread->c_type = CTYPE_GZIP;
|
2010-12-11 03:19:34 +01:00
|
|
|
return 0;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
static int lzma_compress_buf(rzip_control *control, struct compress_thread *cthread)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2010-12-11 03:19:34 +01:00
|
|
|
int lzma_level, lzma_ret;
|
2010-10-31 05:09:05 +01:00
|
|
|
size_t prop_size = 5; /* return value for lzma_properties */
|
2010-11-12 15:26:09 +01:00
|
|
|
uchar *c_buf;
|
2010-03-29 01:07:08 +02:00
|
|
|
size_t dlen;
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
if (!lzo_compresses(control, cthread->s_buf, cthread->s_len))
|
2010-12-11 03:19:34 +01:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/* only 7 levels with lzma, scale them */
|
2011-03-08 22:32:14 +01:00
|
|
|
lzma_level = control->compression_level * 7 / 9 ? : 1;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-12-11 03:19:34 +01:00
|
|
|
print_verbose("Starting lzma back end compression thread...\n");
|
|
|
|
|
retry:
|
2010-11-10 10:56:17 +01:00
|
|
|
dlen = cthread->s_len;
|
2010-11-12 15:26:09 +01:00
|
|
|
c_buf = malloc(dlen);
|
2011-02-24 01:52:30 +01:00
|
|
|
if (!c_buf) {
|
|
|
|
|
print_err("Unable to allocate c_buf in lzma_compress_buf\n");
|
2010-12-11 03:19:34 +01:00
|
|
|
return -1;
|
2011-02-24 01:52:30 +01:00
|
|
|
}
|
2010-11-10 10:56:17 +01:00
|
|
|
|
2010-03-29 01:07:08 +02:00
|
|
|
/* with LZMA SDK 4.63, we pass compression level and threads only
|
|
|
|
|
* and receive properties in control->lzma_properties */
|
|
|
|
|
|
2010-11-12 15:26:09 +01:00
|
|
|
lzma_ret = LzmaCompress(c_buf, &dlen, cthread->s_buf,
|
2011-03-08 22:32:14 +01:00
|
|
|
(size_t)cthread->s_len, control->lzma_properties, &prop_size,
|
2010-12-11 03:19:34 +01:00
|
|
|
lzma_level,
|
2010-12-08 10:53:26 +01:00
|
|
|
0, /* dict size. set default, choose by level */
|
|
|
|
|
-1, -1, -1, -1, /* lc, lp, pb, fb */
|
2011-03-08 22:32:14 +01:00
|
|
|
control->threads);
|
2010-03-29 01:07:08 +02:00
|
|
|
if (lzma_ret != SZ_OK) {
|
|
|
|
|
switch (lzma_ret) {
|
|
|
|
|
case SZ_ERROR_MEM:
|
|
|
|
|
break;
|
|
|
|
|
case SZ_ERROR_PARAM:
|
2010-12-03 23:04:38 +01:00
|
|
|
print_err("LZMA Parameter ERROR: %d. This should not happen.\n", SZ_ERROR_PARAM);
|
2010-03-29 01:07:08 +02:00
|
|
|
break;
|
|
|
|
|
case SZ_ERROR_OUTPUT_EOF:
|
2010-12-03 23:04:38 +01:00
|
|
|
print_maxverbose("Harmless LZMA Output Buffer Overflow error: %d. Incompressible block.\n", SZ_ERROR_OUTPUT_EOF);
|
2010-03-29 01:07:08 +02:00
|
|
|
break;
|
|
|
|
|
case SZ_ERROR_THREAD:
|
2010-12-03 23:04:38 +01:00
|
|
|
print_err("LZMA Multi Thread ERROR: %d. This should not happen.\n", SZ_ERROR_THREAD);
|
2010-03-29 01:07:08 +02:00
|
|
|
break;
|
|
|
|
|
default:
|
2010-11-05 02:16:43 +01:00
|
|
|
print_err("Unidentified LZMA ERROR: %d. This should not happen.\n", lzma_ret);
|
2010-03-29 01:07:08 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
/* can pass -1 if not compressible! Thanks Lasse Collin */
|
2010-11-12 15:26:09 +01:00
|
|
|
free(c_buf);
|
2010-12-04 11:36:51 +01:00
|
|
|
if (lzma_ret == SZ_ERROR_MEM) {
|
2010-12-11 03:19:34 +01:00
|
|
|
if (lzma_level > 1) {
|
|
|
|
|
lzma_level--;
|
|
|
|
|
print_verbose("LZMA Warning: %d. Can't allocate enough RAM for compression window, trying smaller.\n", SZ_ERROR_MEM);
|
|
|
|
|
goto retry;
|
|
|
|
|
}
|
2010-12-04 11:36:51 +01:00
|
|
|
/* lzma compress can be fragile on 32 bit. If it fails,
|
|
|
|
|
* fall back to bzip2 compression so the block doesn't
|
|
|
|
|
* remain uncompressed */
|
2010-12-11 03:19:34 +01:00
|
|
|
print_verbose("Unable to allocate enough RAM for any sized compression window, falling back to bzip2 compression.\n");
|
2011-03-08 22:32:14 +01:00
|
|
|
return bzip2_compress_buf(control, cthread);
|
2010-12-12 07:40:58 +01:00
|
|
|
} else if (lzma_ret == SZ_ERROR_OUTPUT_EOF)
|
|
|
|
|
return 0;
|
2010-12-11 03:19:34 +01:00
|
|
|
return -1;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
2010-12-11 03:19:34 +01:00
|
|
|
|
|
|
|
|
if (unlikely((i64)dlen >= cthread->c_len)) {
|
2010-03-29 01:07:08 +02:00
|
|
|
/* Incompressible, leave as CTYPE_NONE */
|
2010-11-10 10:56:17 +01:00
|
|
|
print_maxverbose("Incompressible block\n");
|
2010-11-12 15:26:09 +01:00
|
|
|
free(c_buf);
|
2010-12-11 03:19:34 +01:00
|
|
|
return 0;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2010-11-10 10:56:17 +01:00
|
|
|
cthread->c_len = dlen;
|
2010-11-12 15:26:09 +01:00
|
|
|
free(cthread->s_buf);
|
|
|
|
|
cthread->s_buf = c_buf;
|
2010-11-10 10:56:17 +01:00
|
|
|
cthread->c_type = CTYPE_LZMA;
|
2010-12-11 03:19:34 +01:00
|
|
|
return 0;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
static int lzo_compress_buf(rzip_control *control, struct compress_thread *cthread)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2010-11-10 10:56:17 +01:00
|
|
|
lzo_uint in_len = cthread->s_len;
|
2010-11-12 15:26:09 +01:00
|
|
|
lzo_uint dlen = in_len + in_len / 16 + 64 + 3;
|
2010-10-31 05:09:05 +01:00
|
|
|
lzo_bytep wrkmem;
|
2010-11-12 15:26:09 +01:00
|
|
|
uchar *c_buf;
|
2010-12-11 03:19:34 +01:00
|
|
|
int ret = -1;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2011-03-10 22:35:15 +01:00
|
|
|
wrkmem = (lzo_bytep) calloc(1, LZO1X_1_MEM_COMPRESS);
|
2010-12-11 03:19:34 +01:00
|
|
|
if (unlikely(wrkmem == NULL)) {
|
|
|
|
|
print_maxverbose("Failed to malloc wkmem\n");
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-11-12 15:26:09 +01:00
|
|
|
c_buf = malloc(dlen);
|
2011-02-24 01:52:30 +01:00
|
|
|
if (!c_buf) {
|
|
|
|
|
print_err("Unable to allocate c_buf in lzo_compress_buf");
|
2010-11-12 15:26:09 +01:00
|
|
|
goto out_free;
|
2011-02-24 01:52:30 +01:00
|
|
|
}
|
2010-11-12 15:26:09 +01:00
|
|
|
|
2011-03-09 03:25:33 +01:00
|
|
|
/* lzo1x_1_compress does not return anything but LZO_OK so we ignore
|
|
|
|
|
* the return value */
|
|
|
|
|
lzo1x_1_compress(cthread->s_buf, in_len, c_buf, &dlen, wrkmem);
|
2010-12-11 03:19:34 +01:00
|
|
|
ret = 0;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
|
|
|
|
if (dlen >= in_len){
|
|
|
|
|
/* Incompressible, leave as CTYPE_NONE */
|
2010-11-10 10:56:17 +01:00
|
|
|
print_maxverbose("Incompressible block\n");
|
2010-11-12 15:26:09 +01:00
|
|
|
free(c_buf);
|
2010-03-29 01:07:08 +02:00
|
|
|
goto out_free;
|
|
|
|
|
}
|
|
|
|
|
|
2010-11-10 10:56:17 +01:00
|
|
|
cthread->c_len = dlen;
|
2010-11-12 15:26:09 +01:00
|
|
|
free(cthread->s_buf);
|
|
|
|
|
cthread->s_buf = c_buf;
|
2010-11-10 10:56:17 +01:00
|
|
|
cthread->c_type = CTYPE_LZO;
|
2010-03-29 01:07:08 +02:00
|
|
|
out_free:
|
|
|
|
|
free(wrkmem);
|
2010-12-11 03:19:34 +01:00
|
|
|
return ret;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
***** DECOMPRESSION FUNCTIONS *****
|
|
|
|
|
|
|
|
|
|
ZPAQ, BZIP, GZIP, LZMA, LZO
|
|
|
|
|
|
|
|
|
|
try to decompress a buffer. Return 0 on success and -1 on failure.
|
|
|
|
|
*/
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
static int zpaq_decompress_buf(rzip_control *control, struct uncomp_thread *ucthread, long thread)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2010-11-03 03:14:46 +01:00
|
|
|
uchar *c_buf = NULL;
|
|
|
|
|
size_t dlen = 0;
|
2010-03-29 01:07:08 +02:00
|
|
|
FILE *in, *out;
|
|
|
|
|
|
2010-11-16 11:25:32 +01:00
|
|
|
in = fmemopen(ucthread->s_buf, ucthread->u_len, "r");
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(!in)) {
|
|
|
|
|
print_err("Failed to fmemopen in zpaq_decompress_buf\n");
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
out = open_memstream((char **)&c_buf, &dlen);
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(!out)) {
|
|
|
|
|
print_err("Failed to open_memstream in zpaq_decompress_buf\n");
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
zpipe_decompress(in, out, control->msgout, ucthread->u_len, (int)(SHOW_PROGRESS), thread);
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(memstream_update_buffer(out, &c_buf, &dlen)))
|
2010-03-29 01:07:08 +02:00
|
|
|
fatal("Failed to memstream_update_buffer in zpaq_decompress_buf");
|
|
|
|
|
|
|
|
|
|
fclose(in);
|
|
|
|
|
fclose(out);
|
|
|
|
|
|
2010-11-16 11:25:32 +01:00
|
|
|
if (unlikely((i64)dlen != ucthread->u_len)) {
|
|
|
|
|
print_err("Inconsistent length after decompression. Got %lld bytes, expected %lld\n", (i64)dlen, ucthread->u_len);
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2010-12-11 04:25:07 +01:00
|
|
|
free(ucthread->s_buf);
|
|
|
|
|
ucthread->s_buf = c_buf;
|
|
|
|
|
|
2010-03-29 01:07:08 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
static int bzip2_decompress_buf(rzip_control *control __UNUSED__, struct uncomp_thread *ucthread)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2010-11-16 11:25:32 +01:00
|
|
|
u32 dlen = ucthread->u_len;
|
2010-12-11 04:25:07 +01:00
|
|
|
int ret = 0, bzerr;
|
2010-10-31 05:09:05 +01:00
|
|
|
uchar *c_buf;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-11-16 11:25:32 +01:00
|
|
|
c_buf = ucthread->s_buf;
|
|
|
|
|
ucthread->s_buf = malloc(dlen);
|
|
|
|
|
if (unlikely(!ucthread->s_buf)) {
|
2010-11-05 02:16:43 +01:00
|
|
|
print_err("Failed to allocate %d bytes for decompression\n", dlen);
|
2010-12-11 04:25:07 +01:00
|
|
|
ret = -1;
|
|
|
|
|
goto out;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2010-11-16 11:25:32 +01:00
|
|
|
bzerr = BZ2_bzBuffToBuffDecompress((char*)ucthread->s_buf, &dlen, (char*)c_buf, ucthread->c_len, 0, 0);
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(bzerr != BZ_OK)) {
|
|
|
|
|
print_err("Failed to decompress buffer - bzerr=%d\n", bzerr);
|
2011-03-15 23:50:30 +01:00
|
|
|
free(ucthread->s_buf);
|
|
|
|
|
ucthread->s_buf = c_buf;
|
2010-12-11 04:25:07 +01:00
|
|
|
ret = -1;
|
2011-03-15 23:50:30 +01:00
|
|
|
goto out;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2010-11-16 11:25:32 +01:00
|
|
|
if (unlikely(dlen != ucthread->u_len)) {
|
|
|
|
|
print_err("Inconsistent length after decompression. Got %d bytes, expected %lld\n", dlen, ucthread->u_len);
|
2010-12-11 04:25:07 +01:00
|
|
|
ret = -1;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
free(c_buf);
|
2010-12-11 04:25:07 +01:00
|
|
|
out:
|
|
|
|
|
if (ret == -1)
|
|
|
|
|
ucthread->s_buf = c_buf;
|
|
|
|
|
return ret;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
static int gzip_decompress_buf(rzip_control *control __UNUSED__, struct uncomp_thread *ucthread)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2010-11-16 11:25:32 +01:00
|
|
|
unsigned long dlen = ucthread->u_len;
|
2010-12-11 04:25:07 +01:00
|
|
|
int ret = 0, gzerr;
|
2010-10-31 05:09:05 +01:00
|
|
|
uchar *c_buf;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-11-16 11:25:32 +01:00
|
|
|
c_buf = ucthread->s_buf;
|
|
|
|
|
ucthread->s_buf = malloc(dlen);
|
|
|
|
|
if (unlikely(!ucthread->s_buf)) {
|
2010-11-05 02:16:43 +01:00
|
|
|
print_err("Failed to allocate %ld bytes for decompression\n", dlen);
|
2010-12-11 04:25:07 +01:00
|
|
|
ret = -1;
|
|
|
|
|
goto out;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2010-11-16 11:25:32 +01:00
|
|
|
gzerr = uncompress(ucthread->s_buf, &dlen, c_buf, ucthread->c_len);
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(gzerr != Z_OK)) {
|
2011-03-15 23:50:30 +01:00
|
|
|
print_err("Failed to decompress buffer - gzerr=%d\n", gzerr);
|
|
|
|
|
free(ucthread->s_buf);
|
|
|
|
|
ucthread->s_buf = c_buf;
|
2010-12-11 04:25:07 +01:00
|
|
|
ret = -1;
|
2011-03-15 23:50:30 +01:00
|
|
|
goto out;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2010-11-16 11:25:32 +01:00
|
|
|
if (unlikely((i64)dlen != ucthread->u_len)) {
|
|
|
|
|
print_err("Inconsistent length after decompression. Got %ld bytes, expected %lld\n", dlen, ucthread->u_len);
|
2010-12-11 04:25:07 +01:00
|
|
|
ret = -1;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
free(c_buf);
|
2010-12-11 04:25:07 +01:00
|
|
|
out:
|
|
|
|
|
if (ret == -1)
|
|
|
|
|
ucthread->s_buf = c_buf;
|
|
|
|
|
return ret;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
static int lzma_decompress_buf(rzip_control *control, struct uncomp_thread *ucthread)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2010-11-16 11:25:32 +01:00
|
|
|
size_t dlen = (size_t)ucthread->u_len;
|
2010-12-11 04:25:07 +01:00
|
|
|
int ret = 0, lzmaerr;
|
2010-10-31 05:09:05 +01:00
|
|
|
uchar *c_buf;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-11-16 11:25:32 +01:00
|
|
|
c_buf = ucthread->s_buf;
|
|
|
|
|
ucthread->s_buf = malloc(dlen);
|
|
|
|
|
if (unlikely(!ucthread->s_buf)) {
|
2010-11-05 13:02:58 +01:00
|
|
|
print_err("Failed to allocate %lldd bytes for decompression\n", (i64)dlen);
|
2010-12-11 04:25:07 +01:00
|
|
|
ret = -1;
|
|
|
|
|
goto out;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
/* With LZMA SDK 4.63 we pass control->lzma_properties
|
2010-03-29 01:07:08 +02:00
|
|
|
* which is needed for proper uncompress */
|
2011-03-08 22:32:14 +01:00
|
|
|
lzmaerr = LzmaUncompress(ucthread->s_buf, &dlen, c_buf, (SizeT *)&ucthread->c_len, control->lzma_properties, 5);
|
2010-11-05 04:52:14 +01:00
|
|
|
if (unlikely(lzmaerr)) {
|
2010-11-05 02:16:43 +01:00
|
|
|
print_err("Failed to decompress buffer - lzmaerr=%d\n", lzmaerr);
|
2011-03-15 23:50:30 +01:00
|
|
|
free(ucthread->s_buf);
|
|
|
|
|
ucthread->s_buf = c_buf;
|
2010-12-11 04:25:07 +01:00
|
|
|
ret = -1;
|
2011-03-15 23:50:30 +01:00
|
|
|
goto out;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2010-11-16 11:25:32 +01:00
|
|
|
if (unlikely((i64)dlen != ucthread->u_len)) {
|
|
|
|
|
print_err("Inconsistent length after decompression. Got %lld bytes, expected %lld\n", (i64)dlen, ucthread->u_len);
|
2010-12-11 04:25:07 +01:00
|
|
|
ret = -1;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
free(c_buf);
|
2010-12-11 04:25:07 +01:00
|
|
|
out:
|
|
|
|
|
if (ret == -1)
|
|
|
|
|
ucthread->s_buf = c_buf;
|
|
|
|
|
return ret;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
static int lzo_decompress_buf(rzip_control *control __UNUSED__, struct uncomp_thread *ucthread)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2010-11-16 11:25:32 +01:00
|
|
|
lzo_uint dlen = ucthread->u_len;
|
2010-12-11 04:25:07 +01:00
|
|
|
int ret = 0, lzerr;
|
2010-10-31 05:09:05 +01:00
|
|
|
uchar *c_buf;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-11-16 11:25:32 +01:00
|
|
|
c_buf = ucthread->s_buf;
|
|
|
|
|
ucthread->s_buf = malloc(dlen);
|
|
|
|
|
if (unlikely(!ucthread->s_buf)) {
|
2010-11-05 13:02:58 +01:00
|
|
|
print_err("Failed to allocate %lu bytes for decompression\n", (unsigned long)dlen);
|
2010-12-11 04:25:07 +01:00
|
|
|
ret = -1;
|
|
|
|
|
goto out;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-15 23:50:30 +01:00
|
|
|
lzerr = lzo1x_decompress((uchar*)c_buf, ucthread->c_len, (uchar*)ucthread->s_buf, &dlen, NULL);
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(lzerr != LZO_E_OK)) {
|
|
|
|
|
print_err("Failed to decompress buffer - lzerr=%d\n", lzerr);
|
2011-03-15 23:50:30 +01:00
|
|
|
free(ucthread->s_buf);
|
|
|
|
|
ucthread->s_buf = c_buf;
|
2010-12-11 04:25:07 +01:00
|
|
|
ret = -1;
|
2011-03-15 23:50:30 +01:00
|
|
|
goto out;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2010-11-16 11:25:32 +01:00
|
|
|
if (unlikely((i64)dlen != ucthread->u_len)) {
|
|
|
|
|
print_err("Inconsistent length after decompression. Got %lu bytes, expected %lld\n", (unsigned long)dlen, ucthread->u_len);
|
2010-12-11 04:25:07 +01:00
|
|
|
ret = -1;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
free(c_buf);
|
2010-12-11 04:25:07 +01:00
|
|
|
out:
|
|
|
|
|
if (ret == -1)
|
|
|
|
|
ucthread->s_buf = c_buf;
|
|
|
|
|
return ret;
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* WORK FUNCTIONS */
|
|
|
|
|
|
|
|
|
|
const i64 one_g = 1000 * 1024 * 1024;
|
|
|
|
|
|
2011-03-14 02:15:54 +01:00
|
|
|
/* Look at whether we're writing to a ram location or physical files and write
|
|
|
|
|
* the data accordingly. */
|
2011-03-14 01:15:35 +01:00
|
|
|
ssize_t put_fdout(rzip_control *control, void *offset_buf, ssize_t ret)
|
2011-03-12 04:13:28 +01:00
|
|
|
{
|
2011-03-12 22:34:06 +01:00
|
|
|
if (!TMP_OUTBUF)
|
2011-03-14 01:15:35 +01:00
|
|
|
return write(control->fd_out, offset_buf, (size_t)ret);
|
2011-03-12 22:16:46 +01:00
|
|
|
|
2011-03-14 02:15:54 +01:00
|
|
|
if (unlikely(control->out_ofs + ret > control->out_maxlen)) {
|
|
|
|
|
/* The data won't fit in a temporary output buffer so we have
|
|
|
|
|
* to fall back to temporary files. */
|
|
|
|
|
print_verbose("Unable to decompress entirely in ram, will use physical files\n");
|
|
|
|
|
write_fdout(control, control->tmp_outbuf, control->out_len);
|
2011-03-14 02:48:40 +01:00
|
|
|
close_tmpoutbuf(control);
|
2011-03-14 02:15:54 +01:00
|
|
|
write_fdout(control, offset_buf, ret);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
2011-03-12 12:46:57 +01:00
|
|
|
memcpy(control->tmp_outbuf + control->out_ofs, offset_buf, ret);
|
2011-03-12 04:13:28 +01:00
|
|
|
control->out_ofs += ret;
|
2011-03-12 09:56:08 +01:00
|
|
|
if (likely(control->out_ofs > control->out_len))
|
|
|
|
|
control->out_len = control->out_ofs;
|
2011-03-12 04:13:28 +01:00
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-29 01:07:08 +02:00
|
|
|
/* This is a custom version of write() which writes in 1GB chunks to avoid
|
|
|
|
|
the overflows at the >= 2GB mark thanks to 32bit fuckage. This should help
|
2010-11-01 02:50:20 +01:00
|
|
|
even on the rare occasion write() fails to write 1GB as well. */
|
2011-03-14 02:15:54 +01:00
|
|
|
ssize_t write_1g(rzip_control *control, void *buf, i64 len)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2010-10-31 05:09:05 +01:00
|
|
|
uchar *offset_buf = buf;
|
2010-03-29 01:07:08 +02:00
|
|
|
ssize_t ret;
|
2011-03-09 03:30:20 +01:00
|
|
|
i64 total;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2011-03-09 03:30:20 +01:00
|
|
|
total = 0;
|
2010-03-29 01:07:08 +02:00
|
|
|
while (len > 0) {
|
2011-03-14 02:15:54 +01:00
|
|
|
ret = MIN(len, one_g);
|
2011-03-14 01:15:35 +01:00
|
|
|
ret = put_fdout(control, offset_buf, (size_t)ret);
|
2011-02-11 01:46:58 +01:00
|
|
|
if (unlikely(ret <= 0))
|
2010-03-29 01:07:08 +02:00
|
|
|
return ret;
|
|
|
|
|
len -= ret;
|
|
|
|
|
offset_buf += ret;
|
|
|
|
|
total += ret;
|
|
|
|
|
}
|
|
|
|
|
return total;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-14 10:22:45 +01:00
|
|
|
static void read_fdin(struct rzip_control *control, i64 len)
|
|
|
|
|
{
|
|
|
|
|
int tmpchar;
|
|
|
|
|
i64 i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < len; i++) {
|
|
|
|
|
tmpchar = getchar();
|
|
|
|
|
if (unlikely(tmpchar == EOF))
|
2011-03-14 11:19:57 +01:00
|
|
|
failure("Reached end of file on STDIN prematurely on read_fdin, asked for %lld got %lld\n",
|
|
|
|
|
len, i);
|
2011-03-14 10:22:45 +01:00
|
|
|
control->tmp_inbuf[control->in_ofs + i] = (char)tmpchar;
|
|
|
|
|
}
|
2011-03-14 11:19:57 +01:00
|
|
|
control->in_len = control->in_ofs + len;
|
2011-03-14 10:22:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static i64 seekto_fdin(rzip_control *control, i64 pos)
|
|
|
|
|
{
|
|
|
|
|
if (!TMP_INBUF)
|
|
|
|
|
return lseek(control->fd_in, pos, SEEK_SET);
|
2011-03-15 01:21:26 +01:00
|
|
|
control->in_ofs = pos;
|
2011-03-14 10:22:45 +01:00
|
|
|
if (unlikely(control->in_ofs > control->in_len || control->in_ofs < 0)) {
|
|
|
|
|
print_err("Tried to seek outside of in_ofs range in seekto_fdin\n");
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
return pos;
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-29 01:07:08 +02:00
|
|
|
/* Ditto for read */
|
2011-03-14 10:22:45 +01:00
|
|
|
ssize_t read_1g(rzip_control *control, int fd, void *buf, i64 len)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2010-10-31 05:09:05 +01:00
|
|
|
uchar *offset_buf = buf;
|
2010-03-29 01:07:08 +02:00
|
|
|
ssize_t ret;
|
2011-03-09 03:33:53 +01:00
|
|
|
i64 total;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2011-03-14 10:22:45 +01:00
|
|
|
if (TMP_INBUF && fd == control->fd_in) {
|
|
|
|
|
/* We're decompressing from STDIN */
|
|
|
|
|
if (unlikely(control->in_ofs + len > control->in_maxlen)) {
|
|
|
|
|
/* We're unable to fit it all into the temp buffer */
|
|
|
|
|
write_fdin(control);
|
|
|
|
|
read_tmpinfile(control, control->fd_in);
|
|
|
|
|
close_tmpinbuf(control);
|
|
|
|
|
goto read_fd;
|
|
|
|
|
}
|
|
|
|
|
if (control->in_ofs + len > control->in_len)
|
|
|
|
|
read_fdin(control, control->in_ofs + len - control->in_len);
|
|
|
|
|
memcpy(buf, control->tmp_inbuf + control->in_ofs, len);
|
|
|
|
|
control->in_ofs += len;
|
|
|
|
|
return len;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-19 04:46:06 +01:00
|
|
|
if (TMP_OUTBUF && fd == control->fd_out) {
|
|
|
|
|
if (unlikely(control->out_ofs + len > control->out_maxlen))
|
|
|
|
|
failure("Trying to read beyond out_ofs in tmpoutbuf\n");
|
|
|
|
|
memcpy(buf, control->tmp_outbuf + control->out_ofs, len);
|
|
|
|
|
control->out_ofs += len;
|
|
|
|
|
return len;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-14 10:22:45 +01:00
|
|
|
read_fd:
|
2011-03-09 03:33:53 +01:00
|
|
|
total = 0;
|
2010-03-29 01:07:08 +02:00
|
|
|
while (len > 0) {
|
2011-03-14 02:23:12 +01:00
|
|
|
ret = MIN(len, one_g);
|
2010-03-29 01:07:08 +02:00
|
|
|
ret = read(fd, offset_buf, (size_t)ret);
|
2011-02-11 01:46:58 +01:00
|
|
|
if (unlikely(ret <= 0))
|
2010-03-29 01:07:08 +02:00
|
|
|
return ret;
|
|
|
|
|
len -= ret;
|
|
|
|
|
offset_buf += ret;
|
|
|
|
|
total += ret;
|
|
|
|
|
}
|
|
|
|
|
return total;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* write to a file, return 0 on success and -1 on failure */
|
2011-03-12 04:13:28 +01:00
|
|
|
static int write_buf(rzip_control *control, int f, uchar *p, i64 len)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
2011-03-14 01:15:35 +01:00
|
|
|
ret = write_1g(control, p, (size_t)len);
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(ret == -1)) {
|
|
|
|
|
print_err("Write of length %lld failed - %s\n", len, strerror(errno));
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
|
|
|
|
}
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(ret != (ssize_t)len)) {
|
2010-11-05 13:02:58 +01:00
|
|
|
print_err("Partial write!? asked for %lld bytes but got %lld\n", len, (i64)ret);
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* write a byte */
|
2011-03-18 13:18:36 +01:00
|
|
|
static inline int write_u8(rzip_control *control, int f, uchar v)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2011-03-12 04:13:28 +01:00
|
|
|
return write_buf(control, f, &v, 1);
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-18 13:18:36 +01:00
|
|
|
static inline int write_val(rzip_control *control, int f, i64 v, int len)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2011-03-20 06:16:38 +01:00
|
|
|
v = htole64(v);
|
2011-03-18 13:18:36 +01:00
|
|
|
return write_buf(control, f, (uchar *)&v, len);
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-14 10:22:45 +01:00
|
|
|
static int read_buf(rzip_control *control, int f, uchar *p, i64 len)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
2011-03-14 10:22:45 +01:00
|
|
|
ret = read_1g(control, f, p, (size_t)len);
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(ret == -1)) {
|
|
|
|
|
print_err("Read of length %lld failed - %s\n", len, strerror(errno));
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
|
|
|
|
}
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(ret != (ssize_t)len)) {
|
|
|
|
|
print_err("Partial read!? asked for %lld bytes but got %lld\n", len, (i64)ret);
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-18 13:18:36 +01:00
|
|
|
static inline int read_u8(rzip_control *control, int f, uchar *v)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2011-03-14 10:22:45 +01:00
|
|
|
return read_buf(control, f, v, 1);
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-18 13:18:36 +01:00
|
|
|
static inline int read_u32(rzip_control *control, int f, u32 *v)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2011-03-20 06:16:38 +01:00
|
|
|
return le32toh(read_buf(control, f, (uchar *)v, 4));
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-18 13:18:36 +01:00
|
|
|
static inline int read_i64(rzip_control *control, int f, i64 *v)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2011-03-20 06:16:38 +01:00
|
|
|
return le64toh(read_buf(control, f, (uchar *)v, 8));
|
2011-03-18 13:18:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int read_val(rzip_control *control, int f, i64 *v, int len)
|
|
|
|
|
{
|
|
|
|
|
/* We only partially read all 8 bytes so have to zero v here */
|
|
|
|
|
*v = 0;
|
|
|
|
|
return read_buf(control, f, (uchar *)v, len);
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-14 01:25:04 +01:00
|
|
|
static int fd_seekto(rzip_control *control, struct stream_info *sinfo, i64 spos, i64 pos)
|
|
|
|
|
{
|
|
|
|
|
if (unlikely(lseek(sinfo->fd, spos, SEEK_SET) != spos)) {
|
|
|
|
|
print_err("Failed to seek to %lld in stream\n", pos);
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-29 01:07:08 +02:00
|
|
|
/* seek to a position within a set of streams - return -1 on failure */
|
2011-03-12 12:46:57 +01:00
|
|
|
static int seekto(rzip_control *control, struct stream_info *sinfo, i64 pos)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
|
|
|
|
i64 spos = pos + sinfo->initial_pos;
|
2010-10-31 05:09:05 +01:00
|
|
|
|
2011-03-12 22:34:06 +01:00
|
|
|
if (TMP_OUTBUF) {
|
2011-03-14 04:47:26 +01:00
|
|
|
spos -= control->out_relofs;
|
2011-03-12 12:46:57 +01:00
|
|
|
control->out_ofs = spos;
|
2011-03-14 01:25:04 +01:00
|
|
|
if (unlikely(spos > control->out_len || spos < 0)) {
|
|
|
|
|
print_err("Trying to seek to %lld outside tmp outbuf in seekto\n", spos);
|
|
|
|
|
return -1;
|
2011-03-12 12:46:57 +01:00
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-14 01:25:04 +01:00
|
|
|
return fd_seekto(control, sinfo, spos, pos);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int read_seekto(rzip_control *control, struct stream_info *sinfo, i64 pos)
|
|
|
|
|
{
|
|
|
|
|
i64 spos = pos + sinfo->initial_pos;
|
|
|
|
|
|
2011-03-14 11:19:57 +01:00
|
|
|
if (TMP_INBUF) {
|
|
|
|
|
if (spos > control->in_len)
|
|
|
|
|
read_fdin(control, spos - control->in_len);
|
|
|
|
|
control->in_ofs = spos;
|
|
|
|
|
if (unlikely(spos < 0)) {
|
|
|
|
|
print_err("Trying to seek to %lld outside tmp inbuf in read_seekto\n", spos);
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-14 01:25:04 +01:00
|
|
|
return fd_seekto(control, sinfo, spos, pos);
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-12 12:46:57 +01:00
|
|
|
static i64 get_seek(rzip_control *control, int fd)
|
|
|
|
|
{
|
|
|
|
|
i64 ret;
|
|
|
|
|
|
2011-03-12 22:34:06 +01:00
|
|
|
if (TMP_OUTBUF)
|
2011-03-14 04:47:26 +01:00
|
|
|
return control->out_relofs + control->out_ofs;
|
2011-03-12 12:46:57 +01:00
|
|
|
ret = lseek(fd, 0, SEEK_CUR);
|
|
|
|
|
if (unlikely(ret == -1))
|
|
|
|
|
fatal("Failed to lseek in get_seek\n");
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-14 11:19:57 +01:00
|
|
|
static i64 get_readseek(rzip_control *control, int fd)
|
|
|
|
|
{
|
|
|
|
|
i64 ret;
|
|
|
|
|
|
|
|
|
|
if (TMP_INBUF)
|
2011-03-15 01:21:26 +01:00
|
|
|
return control->in_ofs;
|
2011-03-14 11:19:57 +01:00
|
|
|
ret = lseek(fd, 0, SEEK_CUR);
|
|
|
|
|
if (unlikely(ret == -1))
|
|
|
|
|
fatal("Failed to lseek in get_seek\n");
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
void prepare_streamout_threads(rzip_control *control)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2010-10-31 05:09:05 +01:00
|
|
|
int i;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2011-02-21 14:49:50 +01:00
|
|
|
/* As we serialise the generation of threads during the rzip
|
|
|
|
|
* pre-processing stage, it's faster to have one more thread available
|
2011-02-22 05:19:31 +01:00
|
|
|
* to keep all CPUs busy. There is no point splitting up the chunks
|
|
|
|
|
* into multiple threads if there will be no compression back end. */
|
2011-03-08 22:32:14 +01:00
|
|
|
if (control->threads > 1)
|
|
|
|
|
++control->threads;
|
2011-02-22 05:19:31 +01:00
|
|
|
if (NO_COMPRESS)
|
2011-03-08 22:32:14 +01:00
|
|
|
control->threads = 1;
|
|
|
|
|
threads = calloc(sizeof(pthread_t), control->threads);
|
2010-11-16 11:25:32 +01:00
|
|
|
if (unlikely(!threads))
|
2010-12-10 13:51:59 +01:00
|
|
|
fatal("Unable to calloc threads in prepare_streamout_threads\n");
|
2010-11-12 15:26:09 +01:00
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
cthread = calloc(sizeof(struct compress_thread), control->threads);
|
2010-11-12 15:26:09 +01:00
|
|
|
if (unlikely(!cthread))
|
2010-12-10 13:51:59 +01:00
|
|
|
fatal("Unable to calloc cthread in prepare_streamout_threads\n");
|
2010-11-12 15:26:09 +01:00
|
|
|
|
2011-03-08 22:50:46 +01:00
|
|
|
for (i = 0; i < control->threads; i++)
|
2011-02-16 14:24:28 +01:00
|
|
|
init_mutex(&cthread[i].mutex);
|
2010-12-10 13:51:59 +01:00
|
|
|
}
|
|
|
|
|
|
2011-02-16 14:24:28 +01:00
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
void close_streamout_threads(rzip_control *control)
|
2010-12-10 13:51:59 +01:00
|
|
|
{
|
2011-03-13 02:12:37 +01:00
|
|
|
int i, close_thread = output_thread;
|
2010-11-12 15:26:09 +01:00
|
|
|
|
2011-02-16 14:24:28 +01:00
|
|
|
/* Wait for the threads in the correct order in case they end up
|
|
|
|
|
* serialised */
|
2011-03-08 22:32:14 +01:00
|
|
|
for (i = 0; i < control->threads; i++) {
|
2011-02-16 14:24:28 +01:00
|
|
|
lock_mutex(&cthread[close_thread].mutex);
|
2011-03-08 22:32:14 +01:00
|
|
|
if (++close_thread == control->threads)
|
2011-02-16 14:24:28 +01:00
|
|
|
close_thread = 0;
|
2010-12-10 13:51:59 +01:00
|
|
|
}
|
|
|
|
|
free(cthread);
|
|
|
|
|
free(threads);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* open a set of output streams, compressing with the given
|
|
|
|
|
compression level and algorithm */
|
2011-03-08 22:34:44 +01:00
|
|
|
void *open_stream_out(rzip_control *control, int f, unsigned int n, i64 chunk_limit, char cbytes)
|
2010-12-10 13:51:59 +01:00
|
|
|
{
|
|
|
|
|
struct stream_info *sinfo;
|
2011-03-07 03:23:14 +01:00
|
|
|
i64 testsize, limit;
|
2010-12-10 13:51:59 +01:00
|
|
|
uchar *testmalloc;
|
2011-03-08 22:34:44 +01:00
|
|
|
unsigned int i, testbufs;
|
2010-12-10 13:51:59 +01:00
|
|
|
|
|
|
|
|
sinfo = calloc(sizeof(struct stream_info), 1);
|
|
|
|
|
if (unlikely(!sinfo))
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2011-03-12 01:17:11 +01:00
|
|
|
sinfo->bufsize = sinfo->size = limit = chunk_limit;
|
2010-12-10 13:51:59 +01:00
|
|
|
|
|
|
|
|
sinfo->chunk_bytes = cbytes;
|
2010-03-29 01:07:08 +02:00
|
|
|
sinfo->num_streams = n;
|
|
|
|
|
sinfo->fd = f;
|
|
|
|
|
|
2010-12-10 13:51:59 +01:00
|
|
|
sinfo->s = calloc(sizeof(struct stream), n);
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(!sinfo->s)) {
|
2010-03-29 01:07:08 +02:00
|
|
|
free(sinfo);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2010-11-02 03:50:15 +01:00
|
|
|
/* Find the largest we can make the window based on ability to malloc
|
2011-02-22 05:19:31 +01:00
|
|
|
* ram. We need 2 buffers for each compression thread and the overhead
|
|
|
|
|
* of each compression back end. No 2nd buf is required when there is
|
|
|
|
|
* no back end compression. We limit the total regardless to 1/3 ram
|
|
|
|
|
* for when the OS lies due to heavy overcommit. */
|
|
|
|
|
if (NO_COMPRESS)
|
|
|
|
|
testbufs = 1;
|
|
|
|
|
else
|
|
|
|
|
testbufs = 2;
|
2011-02-05 23:00:43 +01:00
|
|
|
|
|
|
|
|
/* Serious limits imposed on 32 bit capabilities */
|
|
|
|
|
if (BITS32)
|
2011-03-08 22:34:44 +01:00
|
|
|
limit = MIN((unsigned long long)limit, (two_gig / testbufs) -
|
2011-03-08 22:32:14 +01:00
|
|
|
(control->overhead * control->threads));
|
2011-02-05 23:00:43 +01:00
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
testsize = (limit * testbufs) + (control->overhead * control->threads);
|
|
|
|
|
if (testsize > control->maxram)
|
|
|
|
|
limit = (control->maxram - (control->overhead * control->threads)) / testbufs;
|
2011-03-07 03:23:14 +01:00
|
|
|
|
|
|
|
|
/* If we don't have enough ram for the number of threads, decrease the
|
|
|
|
|
* number of threads till we do, or only have one thread. */
|
|
|
|
|
while (limit < STREAM_BUFSIZE && limit < chunk_limit) {
|
2011-03-08 22:32:14 +01:00
|
|
|
if (control->threads > 1)
|
|
|
|
|
--control->threads;
|
2011-03-07 03:23:14 +01:00
|
|
|
else
|
|
|
|
|
break;
|
2011-03-08 22:32:14 +01:00
|
|
|
limit = (control->maxram - (control->overhead * control->threads)) / testbufs;
|
2011-03-07 03:23:14 +01:00
|
|
|
limit = MIN(limit, chunk_limit);
|
|
|
|
|
}
|
2010-11-02 03:50:15 +01:00
|
|
|
retest_malloc:
|
2011-03-08 22:32:14 +01:00
|
|
|
testsize = (limit * testbufs) + (control->overhead * control->threads);
|
2010-12-03 22:39:52 +01:00
|
|
|
testmalloc = malloc(testsize);
|
2010-11-02 03:50:15 +01:00
|
|
|
if (!testmalloc) {
|
2010-11-19 15:23:08 +01:00
|
|
|
limit = limit / 10 * 9;
|
2010-11-02 03:50:15 +01:00
|
|
|
goto retest_malloc;
|
|
|
|
|
}
|
|
|
|
|
free(testmalloc);
|
2010-12-03 22:39:52 +01:00
|
|
|
print_maxverbose("Succeeded in testing %lld sized malloc for back end compression\n", testsize);
|
2010-11-19 15:23:08 +01:00
|
|
|
|
2011-02-25 00:10:22 +01:00
|
|
|
/* Make the bufsize no smaller than STREAM_BUFSIZE. Round up the
|
|
|
|
|
* bufsize to fit X threads into it */
|
2011-03-08 22:32:14 +01:00
|
|
|
sinfo->bufsize = MIN(limit, MAX((limit + control->threads - 1) / control->threads,
|
2011-02-25 00:10:22 +01:00
|
|
|
STREAM_BUFSIZE));
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
if (control->threads > 1)
|
2011-02-21 14:49:50 +01:00
|
|
|
print_maxverbose("Using up to %d threads to compress up to %lld bytes each.\n",
|
2011-03-08 22:32:14 +01:00
|
|
|
control->threads, sinfo->bufsize);
|
2010-11-12 15:26:09 +01:00
|
|
|
else
|
2011-02-22 05:19:31 +01:00
|
|
|
print_maxverbose("Using only 1 thread to compress up to %lld bytes\n",
|
2010-11-12 15:26:09 +01:00
|
|
|
sinfo->bufsize);
|
2010-11-02 03:50:15 +01:00
|
|
|
|
2010-03-29 01:07:08 +02:00
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
|
sinfo->s[i].buf = malloc(sinfo->bufsize);
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(!sinfo->s[i].buf))
|
2010-11-01 12:55:59 +01:00
|
|
|
fatal("Unable to malloc buffer of size %lld in open_stream_out\n", sinfo->bufsize);
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return (void *)sinfo;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-20 05:45:44 +01:00
|
|
|
/* The block headers are all encrypted so we read the data and salt associated
|
|
|
|
|
* with them, decrypt the data, then return the decrypted version of the
|
|
|
|
|
* values */
|
|
|
|
|
static void decrypt_header(rzip_control *control, uchar *head, uchar *c_type,
|
|
|
|
|
i64 *c_len, i64 *u_len, i64 *last_head)
|
|
|
|
|
{
|
|
|
|
|
uchar *buf = head + SALT_LEN;
|
|
|
|
|
|
|
|
|
|
memcpy(buf, c_type, 1);
|
|
|
|
|
memcpy(buf + 1, c_len, 8);
|
|
|
|
|
memcpy(buf + 9, u_len, 8);
|
|
|
|
|
memcpy(buf + 17, last_head, 8);
|
|
|
|
|
|
|
|
|
|
lrz_decrypt(control, buf, 25, head);
|
|
|
|
|
|
|
|
|
|
memcpy(c_type, buf, 1);
|
|
|
|
|
memcpy(c_len, buf + 1, 8);
|
|
|
|
|
memcpy(u_len, buf + 9, 8);
|
|
|
|
|
memcpy(last_head, buf + 17, 8);
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-29 01:07:08 +02:00
|
|
|
/* prepare a set of n streams for reading on file descriptor f */
|
2011-03-18 13:18:36 +01:00
|
|
|
void *open_stream_in(rzip_control *control, int f, int n, int chunk_bytes)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2010-10-31 05:09:05 +01:00
|
|
|
struct stream_info *sinfo;
|
2010-12-03 09:35:48 +01:00
|
|
|
int total_threads, i;
|
2011-03-20 05:45:44 +01:00
|
|
|
uchar salt[SALT_LEN];
|
2010-03-29 01:07:08 +02:00
|
|
|
i64 header_length;
|
|
|
|
|
|
2010-12-10 13:51:59 +01:00
|
|
|
sinfo = calloc(sizeof(struct stream_info), 1);
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(!sinfo))
|
2010-03-29 01:07:08 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
2011-02-21 14:49:50 +01:00
|
|
|
/* We have one thread dedicated to stream 0, and one more thread than
|
2011-02-21 14:58:55 +01:00
|
|
|
* CPUs to keep them busy, unless we're running single-threaded. */
|
2011-03-08 22:32:14 +01:00
|
|
|
if (control->threads > 1)
|
|
|
|
|
total_threads = control->threads + 2;
|
2011-02-21 14:58:55 +01:00
|
|
|
else
|
2011-03-08 22:32:14 +01:00
|
|
|
total_threads = control->threads + 1;
|
2010-12-10 13:51:59 +01:00
|
|
|
threads = calloc(sizeof(pthread_t), total_threads);
|
2010-11-16 11:25:32 +01:00
|
|
|
if (unlikely(!threads))
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2010-12-03 09:35:48 +01:00
|
|
|
ucthread = calloc(sizeof(struct uncomp_thread), total_threads);
|
2010-11-16 11:25:32 +01:00
|
|
|
if (unlikely(!ucthread))
|
2010-12-03 22:39:52 +01:00
|
|
|
fatal("Unable to calloc cthread in open_stream_in\n");
|
2010-11-16 11:25:32 +01:00
|
|
|
|
2010-03-29 01:07:08 +02:00
|
|
|
sinfo->num_streams = n;
|
|
|
|
|
sinfo->fd = f;
|
2011-03-18 13:18:36 +01:00
|
|
|
sinfo->chunk_bytes = chunk_bytes;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-12-10 13:51:59 +01:00
|
|
|
sinfo->s = calloc(sizeof(struct stream), n);
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(!sinfo->s)) {
|
2010-03-29 01:07:08 +02:00
|
|
|
free(sinfo);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2011-02-09 02:39:15 +01:00
|
|
|
sinfo->s[0].total_threads = 1;
|
2011-02-21 14:49:50 +01:00
|
|
|
sinfo->s[1].total_threads = total_threads - 1;
|
2011-02-09 02:39:15 +01:00
|
|
|
|
2011-03-12 01:17:11 +01:00
|
|
|
if (control->major_version == 0 && control->minor_version > 5) {
|
|
|
|
|
/* Read in flag that tells us if there are more chunks after
|
2011-03-13 11:31:03 +01:00
|
|
|
* this. Ignored if we know the final file size */
|
2011-03-14 10:22:45 +01:00
|
|
|
if (unlikely(read_u8(control, f, &control->eof))) {
|
2011-03-12 01:17:11 +01:00
|
|
|
print_err("Failed to read eof flag in open_stream_in\n");
|
|
|
|
|
goto failed;
|
|
|
|
|
}
|
|
|
|
|
/* Read in the expected chunk size */
|
2011-03-20 05:45:44 +01:00
|
|
|
if (unlikely(!ENCRYPT && read_val(control, f, &sinfo->size, sinfo->chunk_bytes))) {
|
2011-03-12 01:17:11 +01:00
|
|
|
print_err("Failed to read in chunk size in open_stream_in\n");
|
|
|
|
|
goto failed;
|
|
|
|
|
}
|
2011-03-13 07:52:23 +01:00
|
|
|
print_maxverbose("Chunk size: %lld\n", sinfo->size);
|
2011-03-14 03:32:36 +01:00
|
|
|
control->st_size += sinfo->size;
|
2011-03-12 01:17:11 +01:00
|
|
|
}
|
2011-03-14 11:19:57 +01:00
|
|
|
sinfo->initial_pos = get_readseek(control, f);
|
2011-03-12 01:17:11 +01:00
|
|
|
|
2010-03-29 01:07:08 +02:00
|
|
|
for (i = 0; i < n; i++) {
|
2011-03-20 05:45:44 +01:00
|
|
|
uchar c, enc_head[25 + SALT_LEN];
|
2010-03-29 01:07:08 +02:00
|
|
|
i64 v1, v2;
|
|
|
|
|
|
2011-02-09 02:39:15 +01:00
|
|
|
sinfo->s[i].base_thread = i;
|
2010-12-03 09:35:48 +01:00
|
|
|
sinfo->s[i].uthread_no = sinfo->s[i].base_thread;
|
|
|
|
|
sinfo->s[i].unext_thread = sinfo->s[i].base_thread;
|
|
|
|
|
|
2011-03-20 05:45:44 +01:00
|
|
|
if (unlikely(ENCRYPT && read_buf(control, f, enc_head, SALT_LEN)))
|
|
|
|
|
goto failed;
|
2010-03-29 01:07:08 +02:00
|
|
|
again:
|
2011-03-14 10:22:45 +01:00
|
|
|
if (unlikely(read_u8(control, f, &c)))
|
2010-03-29 01:07:08 +02:00
|
|
|
goto failed;
|
|
|
|
|
|
|
|
|
|
/* Compatibility crap for versions < 0.40 */
|
2011-03-08 22:32:14 +01:00
|
|
|
if (control->major_version == 0 && control->minor_version < 4) {
|
2010-03-29 01:07:08 +02:00
|
|
|
u32 v132, v232, last_head32;
|
|
|
|
|
|
2011-03-14 10:22:45 +01:00
|
|
|
if (unlikely(read_u32(control, f, &v132)))
|
2010-03-29 01:07:08 +02:00
|
|
|
goto failed;
|
2011-03-14 10:22:45 +01:00
|
|
|
if (unlikely(read_u32(control, f, &v232)))
|
2010-03-29 01:07:08 +02:00
|
|
|
goto failed;
|
2011-03-14 11:19:57 +01:00
|
|
|
if (unlikely(read_u32(control, f, &last_head32)))
|
2010-03-29 01:07:08 +02:00
|
|
|
goto failed;
|
|
|
|
|
|
|
|
|
|
v1 = v132;
|
|
|
|
|
v2 = v232;
|
|
|
|
|
sinfo->s[i].last_head = last_head32;
|
|
|
|
|
header_length = 13;
|
|
|
|
|
} else {
|
2011-03-18 13:18:36 +01:00
|
|
|
int read_len;
|
|
|
|
|
|
2011-03-20 05:45:44 +01:00
|
|
|
if ((control->major_version == 0 && control->minor_version < 6) ||
|
|
|
|
|
ENCRYPT)
|
|
|
|
|
read_len = 8;
|
|
|
|
|
else
|
|
|
|
|
read_len = sinfo->chunk_bytes;
|
2011-03-18 13:18:36 +01:00
|
|
|
if (unlikely(read_val(control, f, &v1, read_len)))
|
2010-03-29 01:07:08 +02:00
|
|
|
goto failed;
|
2011-03-18 13:18:36 +01:00
|
|
|
if (unlikely(read_val(control, f, &v2, read_len)))
|
2010-03-29 01:07:08 +02:00
|
|
|
goto failed;
|
2011-03-18 13:18:36 +01:00
|
|
|
if (unlikely(read_val(control, f, &sinfo->s[i].last_head, read_len)))
|
2010-03-29 01:07:08 +02:00
|
|
|
goto failed;
|
2011-03-18 13:18:36 +01:00
|
|
|
header_length = 1 + (read_len * 3);
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
2011-03-20 05:45:44 +01:00
|
|
|
|
|
|
|
|
if (ENCRYPT)
|
|
|
|
|
decrypt_header(control, enc_head, &c, &v1, &v2, &sinfo->s[i].last_head);
|
|
|
|
|
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(c == CTYPE_NONE && v1 == 0 && v2 == 0 && sinfo->s[i].last_head == 0 && i == 0)) {
|
|
|
|
|
print_err("Enabling stream close workaround\n");
|
2010-03-29 01:07:08 +02:00
|
|
|
sinfo->initial_pos += header_length;
|
|
|
|
|
goto again;
|
|
|
|
|
}
|
|
|
|
|
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(c != CTYPE_NONE)) {
|
|
|
|
|
print_err("Unexpected initial tag %d in streams\n", c);
|
2010-03-29 01:07:08 +02:00
|
|
|
goto failed;
|
|
|
|
|
}
|
2010-11-05 04:52:14 +01:00
|
|
|
if (unlikely(v1)) {
|
2010-11-05 02:16:43 +01:00
|
|
|
print_err("Unexpected initial c_len %lld in streams %lld\n", v1, v2);
|
2010-03-29 01:07:08 +02:00
|
|
|
goto failed;
|
|
|
|
|
}
|
2010-11-05 04:52:14 +01:00
|
|
|
if (unlikely(v2)) {
|
2010-11-05 02:16:43 +01:00
|
|
|
print_err("Unexpected initial u_len %lld in streams\n", v2);
|
2010-03-29 01:07:08 +02:00
|
|
|
goto failed;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return (void *)sinfo;
|
|
|
|
|
|
|
|
|
|
failed:
|
|
|
|
|
free(sinfo->s);
|
|
|
|
|
free(sinfo);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-17 01:19:34 +01:00
|
|
|
#define MIN_SIZE (ENCRYPT ? CBC_LEN : 0)
|
|
|
|
|
|
2011-03-20 05:45:44 +01:00
|
|
|
/* Once the final data has all been written to the block header, we go back
|
|
|
|
|
* and write SALT_LEN bytes of salt before it, and encrypt the header in place
|
|
|
|
|
* by reading what has been written, encrypting it, and writing back over it.
|
|
|
|
|
* This is very convoluted depending on whether a last_head value is written
|
|
|
|
|
* to this block or not. See the callers of this function */
|
|
|
|
|
static void rewrite_encrypted(rzip_control *control, struct stream_info *sinfo, i64 ofs)
|
|
|
|
|
{
|
|
|
|
|
uchar *buf, *head;
|
|
|
|
|
i64 cur_ofs;
|
|
|
|
|
|
|
|
|
|
cur_ofs = get_seek(control, sinfo->fd);
|
|
|
|
|
head = malloc(25 + SALT_LEN);
|
|
|
|
|
if (unlikely(!head))
|
|
|
|
|
fatal("Failed to malloc head in rewrite_encrypted\n");
|
|
|
|
|
buf = head + SALT_LEN;
|
|
|
|
|
get_rand(head, SALT_LEN);
|
|
|
|
|
if (unlikely(seekto(control, sinfo, ofs - SALT_LEN)))
|
|
|
|
|
failure("Failed to seekto buf ofs in rewrite_encrypted\n");
|
|
|
|
|
if (unlikely(write_buf(control, sinfo->fd, head, SALT_LEN)))
|
|
|
|
|
failure("Failed to write_buf head in rewrite_encrypted\n");
|
|
|
|
|
if (unlikely(read_buf(control, sinfo->fd, buf, 25)))
|
|
|
|
|
failure("Failed to read_buf buf in rewrite_encrypted\n");
|
|
|
|
|
|
|
|
|
|
lrz_encrypt(control, buf, 25, head);
|
|
|
|
|
|
|
|
|
|
if (unlikely(seekto(control, sinfo, ofs)))
|
|
|
|
|
failure("Failed to seek back to ofs in rewrite_encrypted\n");
|
|
|
|
|
if (unlikely(write_buf(control, sinfo->fd, buf, 25)))
|
|
|
|
|
failure("Failed to write_buf encrypted buf in rewrite_encrypted\n");
|
|
|
|
|
free(head);
|
|
|
|
|
seekto(control, sinfo, cur_ofs);
|
|
|
|
|
}
|
|
|
|
|
|
2010-11-12 15:26:09 +01:00
|
|
|
/* Enter with s_buf allocated,s_buf points to the compressed data after the
|
|
|
|
|
* backend compression and is then freed here */
|
2011-03-08 22:32:14 +01:00
|
|
|
static void *compthread(void *data)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2011-03-10 22:33:35 +01:00
|
|
|
stream_thread_struct *s = data;
|
|
|
|
|
rzip_control *control = s->control;
|
|
|
|
|
long i = s->i;
|
|
|
|
|
struct compress_thread *cti;
|
|
|
|
|
struct stream_info *ctis;
|
2010-12-11 03:19:34 +01:00
|
|
|
int waited = 0, ret = 0;
|
2011-03-17 01:19:34 +01:00
|
|
|
i64 padded_len;
|
2011-03-20 05:45:44 +01:00
|
|
|
int write_len;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2011-03-10 22:35:15 +01:00
|
|
|
/* Make sure this thread doesn't already exist */
|
|
|
|
|
|
2011-03-10 22:33:35 +01:00
|
|
|
free(data);
|
|
|
|
|
cti = &cthread[i];
|
|
|
|
|
ctis = cti->sinfo;
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
if (unlikely(setpriority(PRIO_PROCESS, 0, control->nice_val) == -1))
|
2010-11-24 10:12:19 +01:00
|
|
|
print_err("Warning, unable to set nice value on thread\n");
|
|
|
|
|
|
2010-11-12 15:26:09 +01:00
|
|
|
cti->c_type = CTYPE_NONE;
|
|
|
|
|
cti->c_len = cti->s_len;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2011-02-07 22:27:22 +01:00
|
|
|
/* Flushing writes to disk frees up any dirty ram, improving chances
|
2010-12-18 00:02:19 +01:00
|
|
|
* of succeeding in allocating more ram */
|
|
|
|
|
fsync(ctis->fd);
|
2010-12-11 03:19:34 +01:00
|
|
|
retry:
|
2010-11-12 15:26:09 +01:00
|
|
|
if (!NO_COMPRESS && cti->c_len) {
|
2010-11-01 11:37:55 +01:00
|
|
|
if (LZMA_COMPRESS)
|
2011-03-08 22:32:14 +01:00
|
|
|
ret = lzma_compress_buf(control, cti);
|
2010-11-01 01:18:58 +01:00
|
|
|
else if (LZO_COMPRESS)
|
2011-03-08 22:32:14 +01:00
|
|
|
ret = lzo_compress_buf(control, cti);
|
2010-11-01 01:18:58 +01:00
|
|
|
else if (BZIP2_COMPRESS)
|
2011-03-08 22:32:14 +01:00
|
|
|
ret = bzip2_compress_buf(control, cti);
|
2010-11-01 01:18:58 +01:00
|
|
|
else if (ZLIB_COMPRESS)
|
2011-03-08 22:32:14 +01:00
|
|
|
ret = gzip_compress_buf(control, cti);
|
2010-11-01 01:18:58 +01:00
|
|
|
else if (ZPAQ_COMPRESS)
|
2011-03-08 22:32:14 +01:00
|
|
|
ret = zpaq_compress_buf(control, cti, i);
|
2011-02-21 04:51:20 +01:00
|
|
|
else failure("Dunno wtf compression to use!\n");
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-17 01:19:34 +01:00
|
|
|
padded_len = cti->c_len;
|
|
|
|
|
if (!ret && padded_len < MIN_SIZE) {
|
|
|
|
|
/* We need to pad out each block to at least be CBC_LEN bytes
|
|
|
|
|
* long or encryption cannot work. We pad it with random
|
|
|
|
|
* data */
|
|
|
|
|
padded_len = MIN_SIZE;
|
|
|
|
|
cti->s_buf = realloc(cti->s_buf, MIN_SIZE);
|
|
|
|
|
if (unlikely(!cti->s_buf))
|
|
|
|
|
fatal("Failed to realloc s_buf in compthread\n");
|
|
|
|
|
get_rand(cti->s_buf + cti->c_len, MIN_SIZE - cti->c_len);
|
|
|
|
|
}
|
|
|
|
|
|
2010-12-11 03:19:34 +01:00
|
|
|
/* If compression fails for whatever reason multithreaded, then wait
|
|
|
|
|
* for the previous thread to finish, serialising the work to decrease
|
|
|
|
|
* the memory requirements, increasing the chance of success */
|
2011-02-16 14:24:28 +01:00
|
|
|
if (unlikely(ret && waited))
|
2011-02-21 04:51:20 +01:00
|
|
|
failure("Failed to compress in compthread\n");
|
2011-02-16 14:24:28 +01:00
|
|
|
|
|
|
|
|
if (!waited) {
|
|
|
|
|
lock_mutex(&output_lock);
|
|
|
|
|
while (output_thread != i)
|
|
|
|
|
cond_wait(&output_cond, &output_lock);
|
|
|
|
|
unlock_mutex(&output_lock);
|
|
|
|
|
waited = 1;
|
2010-12-11 03:19:34 +01:00
|
|
|
}
|
2011-02-16 14:24:28 +01:00
|
|
|
if (unlikely(ret)) {
|
|
|
|
|
print_maxverbose("Unable to compress in parallel, waiting for previous thread to complete before trying again\n");
|
2010-12-11 03:19:34 +01:00
|
|
|
goto retry;
|
2011-02-16 14:24:28 +01:00
|
|
|
}
|
2010-11-10 10:56:17 +01:00
|
|
|
|
2011-03-20 05:45:44 +01:00
|
|
|
/* Need to be big enough to fill one CBC_LEN */
|
|
|
|
|
if (ENCRYPT)
|
|
|
|
|
write_len = 8;
|
|
|
|
|
else
|
|
|
|
|
write_len = ctis->chunk_bytes;
|
|
|
|
|
|
2010-12-10 13:51:59 +01:00
|
|
|
if (!ctis->chunks++) {
|
|
|
|
|
int j;
|
|
|
|
|
|
2011-03-12 22:34:06 +01:00
|
|
|
if (TMP_OUTBUF) {
|
2011-03-12 12:46:57 +01:00
|
|
|
if (!control->magic_written)
|
2011-03-17 06:59:27 +01:00
|
|
|
write_magic(control);
|
2011-03-14 02:48:40 +01:00
|
|
|
flush_tmpoutbuf(control);
|
2011-03-12 12:46:57 +01:00
|
|
|
}
|
|
|
|
|
|
2010-12-10 13:51:59 +01:00
|
|
|
/* Write chunk bytes of this block */
|
2011-03-12 04:13:28 +01:00
|
|
|
write_u8(control, ctis->fd, ctis->chunk_bytes);
|
2010-12-10 13:51:59 +01:00
|
|
|
|
2011-03-12 01:17:11 +01:00
|
|
|
/* Write whether this is the last chunk, followed by the size
|
|
|
|
|
* of this chunk */
|
2011-03-12 04:13:28 +01:00
|
|
|
write_u8(control, ctis->fd, control->eof);
|
2011-03-20 05:45:44 +01:00
|
|
|
if (!ENCRYPT)
|
|
|
|
|
write_val(control, ctis->fd, ctis->size, ctis->chunk_bytes);
|
2011-03-12 01:17:11 +01:00
|
|
|
|
2010-12-10 13:51:59 +01:00
|
|
|
/* First chunk of this stream, write headers */
|
2011-03-12 12:46:57 +01:00
|
|
|
ctis->initial_pos = get_seek(control, ctis->fd);
|
2010-12-10 13:51:59 +01:00
|
|
|
|
|
|
|
|
for (j = 0; j < ctis->num_streams; j++) {
|
2011-03-20 05:45:44 +01:00
|
|
|
/* If encrypting, we leave SALT_LEN room to write in salt
|
|
|
|
|
* later */
|
|
|
|
|
if (ENCRYPT) {
|
|
|
|
|
if (unlikely(write_val(control, ctis->fd, 0, SALT_LEN)))
|
|
|
|
|
fatal("Failed to write_buf blank salt in compthread %d\n", i);
|
|
|
|
|
ctis->cur_pos += SALT_LEN;
|
|
|
|
|
}
|
|
|
|
|
ctis->s[j].last_head = ctis->cur_pos + 1 + (write_len * 2);
|
2011-03-12 04:13:28 +01:00
|
|
|
write_u8(control, ctis->fd, CTYPE_NONE);
|
2011-03-20 05:45:44 +01:00
|
|
|
write_val(control, ctis->fd, 0, write_len);
|
|
|
|
|
write_val(control, ctis->fd, 0, write_len);
|
|
|
|
|
write_val(control, ctis->fd, 0, write_len);
|
|
|
|
|
ctis->cur_pos += 1 + (write_len * 3);
|
2010-12-10 13:51:59 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-12 12:46:57 +01:00
|
|
|
if (unlikely(seekto(control, ctis, ctis->s[cti->streamno].last_head)))
|
2010-11-12 15:26:09 +01:00
|
|
|
fatal("Failed to seekto in compthread %d\n", i);
|
2010-11-10 10:56:17 +01:00
|
|
|
|
2011-03-20 05:45:44 +01:00
|
|
|
if (unlikely(write_val(control, ctis->fd, ctis->cur_pos, write_len)))
|
2011-03-18 13:18:36 +01:00
|
|
|
fatal("Failed to write_val cur_pos in compthread %d\n", i);
|
2010-11-10 10:56:17 +01:00
|
|
|
|
2011-03-20 05:45:44 +01:00
|
|
|
if (ENCRYPT)
|
|
|
|
|
rewrite_encrypted(control, ctis, ctis->s[cti->streamno].last_head - 17);
|
|
|
|
|
|
|
|
|
|
ctis->s[cti->streamno].last_head = ctis->cur_pos + 1 + (write_len * 2);
|
2011-03-12 12:46:57 +01:00
|
|
|
if (unlikely(seekto(control, ctis, ctis->cur_pos)))
|
2010-11-12 15:26:09 +01:00
|
|
|
fatal("Failed to seekto cur_pos in compthread %d\n", i);
|
2010-11-10 10:56:17 +01:00
|
|
|
|
2011-03-17 01:19:34 +01:00
|
|
|
print_maxverbose("Thread %ld writing %lld compressed bytes from stream %d\n", i, padded_len, cti->streamno);
|
2011-03-20 05:45:44 +01:00
|
|
|
|
|
|
|
|
if (ENCRYPT) {
|
|
|
|
|
if (unlikely(write_val(control, ctis->fd, 0, SALT_LEN)))
|
|
|
|
|
fatal("Failed to write_buf header salt in compthread %d\n", i);
|
|
|
|
|
ctis->cur_pos += SALT_LEN;
|
|
|
|
|
ctis->s[cti->streamno].last_headofs = ctis->cur_pos;
|
|
|
|
|
}
|
2011-03-17 01:19:34 +01:00
|
|
|
/* We store the actual c_len even though we might pad it out */
|
2011-03-12 04:13:28 +01:00
|
|
|
if (unlikely(write_u8(control, ctis->fd, cti->c_type) ||
|
2011-03-20 05:45:44 +01:00
|
|
|
write_val(control, ctis->fd, cti->c_len, write_len) ||
|
|
|
|
|
write_val(control, ctis->fd, cti->s_len, write_len) ||
|
|
|
|
|
write_val(control, ctis->fd, 0, write_len))) {
|
2010-11-12 15:26:09 +01:00
|
|
|
fatal("Failed write in compthread %d\n", i);
|
2010-11-10 10:56:17 +01:00
|
|
|
}
|
2011-03-20 05:45:44 +01:00
|
|
|
ctis->cur_pos += 1 + (write_len * 3);
|
2010-11-10 10:56:17 +01:00
|
|
|
|
2011-03-17 14:06:11 +01:00
|
|
|
if (ENCRYPT) {
|
2011-03-20 05:45:44 +01:00
|
|
|
get_rand(cti->salt, SALT_LEN);
|
|
|
|
|
if (unlikely(write_buf(control, ctis->fd, cti->salt, SALT_LEN)))
|
|
|
|
|
fatal("Failed to write_buf block salt in compthread %d\n", i);
|
|
|
|
|
lrz_encrypt(control, cti->s_buf, padded_len, cti->salt);
|
|
|
|
|
ctis->cur_pos += SALT_LEN;
|
2011-03-17 14:06:11 +01:00
|
|
|
}
|
2011-03-17 01:19:34 +01:00
|
|
|
if (unlikely(write_buf(control, ctis->fd, cti->s_buf, padded_len)))
|
2011-03-17 14:06:11 +01:00
|
|
|
fatal("Failed to write_buf s_buf in compthread %d\n", i);
|
2010-11-10 10:56:17 +01:00
|
|
|
|
2011-03-17 01:19:34 +01:00
|
|
|
ctis->cur_pos += padded_len;
|
2010-11-12 15:26:09 +01:00
|
|
|
free(cti->s_buf);
|
2010-11-10 10:56:17 +01:00
|
|
|
|
2011-02-16 14:24:28 +01:00
|
|
|
lock_mutex(&output_lock);
|
2011-03-08 22:32:14 +01:00
|
|
|
if (++output_thread == control->threads)
|
2011-02-16 14:24:28 +01:00
|
|
|
output_thread = 0;
|
|
|
|
|
cond_broadcast(&output_cond);
|
|
|
|
|
unlock_mutex(&output_lock);
|
|
|
|
|
|
|
|
|
|
unlock_mutex(&cti->mutex);
|
2010-11-10 10:56:17 +01:00
|
|
|
|
2010-11-12 15:26:09 +01:00
|
|
|
return 0;
|
|
|
|
|
}
|
2010-11-10 10:56:17 +01:00
|
|
|
|
2011-03-10 22:35:15 +01:00
|
|
|
static void clear_buffer(rzip_control *control, struct stream_info *sinfo, int streamno, int newbuf)
|
2010-11-12 15:26:09 +01:00
|
|
|
{
|
2010-12-10 13:51:59 +01:00
|
|
|
static long i = 0;
|
2011-03-10 22:33:35 +01:00
|
|
|
stream_thread_struct *s;
|
2010-11-10 10:56:17 +01:00
|
|
|
|
2010-11-12 15:26:09 +01:00
|
|
|
/* Make sure this thread doesn't already exist */
|
2011-02-16 14:24:28 +01:00
|
|
|
lock_mutex(&cthread[i].mutex);
|
2010-11-10 10:56:17 +01:00
|
|
|
|
2010-11-12 15:26:09 +01:00
|
|
|
cthread[i].sinfo = sinfo;
|
2011-03-10 22:35:15 +01:00
|
|
|
cthread[i].streamno = streamno;
|
|
|
|
|
cthread[i].s_buf = sinfo->s[streamno].buf;
|
|
|
|
|
cthread[i].s_len = sinfo->s[streamno].buflen;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-11-12 15:26:09 +01:00
|
|
|
print_maxverbose("Starting thread %ld to compress %lld bytes from stream %d\n",
|
2011-03-10 22:35:15 +01:00
|
|
|
i, cthread[i].s_len, streamno);
|
2011-03-10 22:33:35 +01:00
|
|
|
|
|
|
|
|
s = malloc(sizeof(stream_thread_struct));
|
|
|
|
|
if (unlikely(!s))
|
|
|
|
|
fatal("Unable to malloc in clear_buffer");
|
|
|
|
|
s->i = i;
|
|
|
|
|
s->control = control;
|
|
|
|
|
create_pthread(&threads[i], NULL, compthread, s);
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-12-10 13:51:59 +01:00
|
|
|
if (newbuf) {
|
2011-03-15 14:46:39 +01:00
|
|
|
/* The stream buffer has been given to the thread, allocate a
|
2011-03-16 11:24:05 +01:00
|
|
|
* new one. */
|
|
|
|
|
sinfo->s[streamno].buf = malloc(sinfo->bufsize);
|
2011-03-10 22:35:15 +01:00
|
|
|
if (unlikely(!sinfo->s[streamno].buf))
|
2010-12-10 13:51:59 +01:00
|
|
|
fatal("Unable to malloc buffer of size %lld in flush_buffer\n", sinfo->bufsize);
|
2011-03-10 22:35:15 +01:00
|
|
|
sinfo->s[streamno].buflen = 0;
|
2010-12-10 13:51:59 +01:00
|
|
|
}
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
if (++i == control->threads)
|
2010-12-10 13:51:59 +01:00
|
|
|
i = 0;
|
|
|
|
|
}
|
2010-11-10 10:56:17 +01:00
|
|
|
|
2010-12-10 13:51:59 +01:00
|
|
|
/* flush out any data in a stream buffer */
|
2011-03-10 22:35:15 +01:00
|
|
|
void flush_buffer(rzip_control *control, struct stream_info *sinfo, int streamno)
|
2010-12-10 13:51:59 +01:00
|
|
|
{
|
2011-03-10 22:35:15 +01:00
|
|
|
clear_buffer(control, sinfo, streamno, 1);
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
static void *ucompthread(void *data)
|
2010-11-16 11:25:32 +01:00
|
|
|
{
|
2011-03-10 22:33:35 +01:00
|
|
|
stream_thread_struct *s = data;
|
|
|
|
|
rzip_control *control = s->control;
|
|
|
|
|
long i = s->i;
|
|
|
|
|
struct uncomp_thread *uci;
|
2010-12-11 03:19:34 +01:00
|
|
|
int waited = 0, ret = 0;
|
2010-11-16 11:25:32 +01:00
|
|
|
|
2011-03-10 22:33:35 +01:00
|
|
|
free(data);
|
|
|
|
|
uci = &ucthread[i];
|
|
|
|
|
|
2011-03-08 22:32:14 +01:00
|
|
|
if (unlikely(setpriority(PRIO_PROCESS, 0, control->nice_val) == -1))
|
2010-11-24 10:12:19 +01:00
|
|
|
print_err("Warning, unable to set nice value on thread\n");
|
|
|
|
|
|
2010-12-11 03:19:34 +01:00
|
|
|
retry:
|
2010-11-16 11:25:32 +01:00
|
|
|
if (uci->c_type != CTYPE_NONE) {
|
2010-12-11 03:19:34 +01:00
|
|
|
switch (uci->c_type) {
|
|
|
|
|
case CTYPE_LZMA:
|
2011-03-08 22:32:14 +01:00
|
|
|
ret = lzma_decompress_buf(control, uci);
|
2010-12-11 03:19:34 +01:00
|
|
|
break;
|
|
|
|
|
case CTYPE_LZO:
|
2011-03-08 22:32:14 +01:00
|
|
|
ret = lzo_decompress_buf(control, uci);
|
2010-12-11 03:19:34 +01:00
|
|
|
break;
|
|
|
|
|
case CTYPE_BZIP2:
|
2011-03-08 22:32:14 +01:00
|
|
|
ret = bzip2_decompress_buf(control, uci);
|
2010-12-11 03:19:34 +01:00
|
|
|
break;
|
|
|
|
|
case CTYPE_GZIP:
|
2011-03-08 22:32:14 +01:00
|
|
|
ret = gzip_decompress_buf(control, uci);
|
2010-12-11 03:19:34 +01:00
|
|
|
break;
|
|
|
|
|
case CTYPE_ZPAQ:
|
2011-03-08 22:32:14 +01:00
|
|
|
ret = zpaq_decompress_buf(control, uci, i);
|
2010-12-11 03:19:34 +01:00
|
|
|
break;
|
|
|
|
|
default:
|
2011-02-21 04:51:20 +01:00
|
|
|
failure("Dunno wtf decompression type to use!\n");
|
2010-12-11 03:19:34 +01:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* As per compression, serialise the decompression if it fails in
|
|
|
|
|
* parallel */
|
2011-02-16 14:24:28 +01:00
|
|
|
if (unlikely(ret)) {
|
|
|
|
|
if (unlikely(waited))
|
2011-02-21 04:51:20 +01:00
|
|
|
failure("Failed to decompress in ucompthread\n");
|
2010-12-11 03:19:34 +01:00
|
|
|
print_maxverbose("Unable to decompress in parallel, waiting for previous thread to complete before trying again\n");
|
2011-02-16 14:24:28 +01:00
|
|
|
/* We do not strictly need to wait for this, so it's used when
|
|
|
|
|
* decompression fails due to inadequate memory to try again
|
|
|
|
|
* serialised. */
|
|
|
|
|
lock_mutex(&output_lock);
|
|
|
|
|
while (output_thread != i)
|
|
|
|
|
cond_wait(&output_cond, &output_lock);
|
|
|
|
|
unlock_mutex(&output_lock);
|
2011-02-06 01:21:36 +01:00
|
|
|
waited = 1;
|
2010-12-11 03:19:34 +01:00
|
|
|
goto retry;
|
2011-02-06 01:58:47 +01:00
|
|
|
}
|
2010-12-11 03:19:34 +01:00
|
|
|
|
2011-03-10 22:35:15 +01:00
|
|
|
print_maxverbose("Thread %ld decompressed %lld bytes from stream %d\n", i, uci->u_len, uci->streamno);
|
2010-11-16 11:25:32 +01:00
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2011-02-09 02:38:32 +01:00
|
|
|
/* fill a buffer from a stream - return -1 on failure */
|
2011-03-10 22:35:15 +01:00
|
|
|
static int fill_buffer(rzip_control *control, struct stream_info *sinfo, int streamno)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2011-03-20 05:45:44 +01:00
|
|
|
i64 u_len, c_len, last_head, padded_len;
|
|
|
|
|
uchar enc_head[25 + SALT_LEN], blocksalt[SALT_LEN];
|
2011-03-10 22:35:15 +01:00
|
|
|
struct stream *s = &sinfo->s[streamno];
|
2011-03-10 22:33:35 +01:00
|
|
|
stream_thread_struct *st;
|
2011-03-17 01:19:34 +01:00
|
|
|
uchar c_type, *s_buf;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-12-03 09:35:48 +01:00
|
|
|
if (s->buf)
|
|
|
|
|
free(s->buf);
|
2011-02-09 02:39:15 +01:00
|
|
|
if (s->eos)
|
|
|
|
|
goto out;
|
2011-02-06 01:21:36 +01:00
|
|
|
fill_another:
|
2011-02-16 14:24:28 +01:00
|
|
|
if (unlikely(ucthread[s->uthread_no].busy))
|
2011-02-21 04:51:20 +01:00
|
|
|
failure("Trying to start a busy thread, this shouldn't happen!\n");
|
2011-02-16 14:24:28 +01:00
|
|
|
|
2011-03-14 01:25:04 +01:00
|
|
|
if (unlikely(read_seekto(control, sinfo, s->last_head)))
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
|
|
|
|
|
2011-03-20 05:45:44 +01:00
|
|
|
if (unlikely(ENCRYPT && read_buf(control, sinfo->fd, enc_head, SALT_LEN)))
|
|
|
|
|
return -1;
|
|
|
|
|
|
2011-03-14 10:22:45 +01:00
|
|
|
if (unlikely(read_u8(control, sinfo->fd, &c_type)))
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
2010-11-24 10:12:19 +01:00
|
|
|
|
2010-03-29 01:07:08 +02:00
|
|
|
/* Compatibility crap for versions < 0.4 */
|
2011-03-08 22:32:14 +01:00
|
|
|
if (control->major_version == 0 && control->minor_version < 4) {
|
2010-03-29 01:07:08 +02:00
|
|
|
u32 c_len32, u_len32, last_head32;
|
|
|
|
|
|
2011-03-14 10:22:45 +01:00
|
|
|
if (unlikely(read_u32(control, sinfo->fd, &c_len32)))
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
2011-03-14 10:22:45 +01:00
|
|
|
if (unlikely(read_u32(control, sinfo->fd, &u_len32)))
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
2011-03-14 10:22:45 +01:00
|
|
|
if (unlikely(read_u32(control, sinfo->fd, &last_head32)))
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
|
|
|
|
c_len = c_len32;
|
|
|
|
|
u_len = u_len32;
|
2010-11-16 11:25:32 +01:00
|
|
|
last_head = last_head32;
|
2010-03-29 01:07:08 +02:00
|
|
|
} else {
|
2011-03-18 13:18:36 +01:00
|
|
|
int read_len;
|
|
|
|
|
|
2011-03-20 05:45:44 +01:00
|
|
|
if ((control->major_version == 0 && control->minor_version < 6) || ENCRYPT)
|
2011-03-18 13:18:36 +01:00
|
|
|
read_len = 8;
|
|
|
|
|
else
|
|
|
|
|
read_len = sinfo->chunk_bytes;
|
|
|
|
|
if (unlikely(read_val(control, sinfo->fd, &c_len, read_len)))
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
2011-03-18 13:18:36 +01:00
|
|
|
if (unlikely(read_val(control, sinfo->fd, &u_len, read_len)))
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
2011-03-18 13:18:36 +01:00
|
|
|
if (unlikely(read_val(control, sinfo->fd, &last_head, read_len)))
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-17 14:06:11 +01:00
|
|
|
if (ENCRYPT) {
|
2011-03-20 05:45:44 +01:00
|
|
|
decrypt_header(control, enc_head, &c_type, &c_len, &u_len, &last_head);
|
|
|
|
|
if (unlikely(read_buf(control, sinfo->fd, blocksalt, SALT_LEN)))
|
2011-03-17 14:06:11 +01:00
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-17 01:19:34 +01:00
|
|
|
padded_len = MAX(c_len, MIN_SIZE);
|
2011-03-08 22:32:14 +01:00
|
|
|
fsync(control->fd_out);
|
2010-11-16 11:25:32 +01:00
|
|
|
|
2011-03-17 01:19:34 +01:00
|
|
|
s_buf = malloc(MAX(u_len, MIN_SIZE));
|
2011-03-16 11:48:21 +01:00
|
|
|
if (unlikely(u_len && !s_buf))
|
|
|
|
|
fatal("Unable to malloc buffer of size %lld in fill_buffer\n", u_len);
|
|
|
|
|
sinfo->ram_alloced += u_len;
|
2011-03-15 13:52:39 +01:00
|
|
|
|
2011-03-17 01:19:34 +01:00
|
|
|
if (unlikely(read_buf(control, sinfo->fd, s_buf, padded_len)))
|
2010-03-29 01:07:08 +02:00
|
|
|
return -1;
|
|
|
|
|
|
2011-03-17 04:10:58 +01:00
|
|
|
if (ENCRYPT)
|
2011-03-20 05:45:44 +01:00
|
|
|
lrz_decrypt(control, s_buf, padded_len, blocksalt);
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2010-12-03 09:35:48 +01:00
|
|
|
ucthread[s->uthread_no].s_buf = s_buf;
|
|
|
|
|
ucthread[s->uthread_no].c_len = c_len;
|
|
|
|
|
ucthread[s->uthread_no].u_len = u_len;
|
|
|
|
|
ucthread[s->uthread_no].c_type = c_type;
|
2011-03-10 22:35:15 +01:00
|
|
|
ucthread[s->uthread_no].streamno = streamno;
|
2010-12-03 09:35:48 +01:00
|
|
|
s->last_head = last_head;
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2011-02-07 22:55:36 +01:00
|
|
|
/* List this thread as busy */
|
|
|
|
|
ucthread[s->uthread_no].busy = 1;
|
2010-11-16 11:25:32 +01:00
|
|
|
print_maxverbose("Starting thread %ld to decompress %lld bytes from stream %d\n",
|
2011-03-17 01:19:34 +01:00
|
|
|
s->uthread_no, padded_len, streamno);
|
2011-03-10 22:33:35 +01:00
|
|
|
|
|
|
|
|
st = malloc(sizeof(stream_thread_struct));
|
|
|
|
|
if (unlikely(!st))
|
|
|
|
|
fatal("Unable to malloc in fill_buffer");
|
|
|
|
|
st->i = s->uthread_no;
|
|
|
|
|
st->control = control;
|
|
|
|
|
create_pthread(&threads[s->uthread_no], NULL, ucompthread, st);
|
2010-11-16 11:25:32 +01:00
|
|
|
|
2011-02-09 02:39:15 +01:00
|
|
|
if (++s->uthread_no == s->base_thread + s->total_threads)
|
2010-12-03 09:35:48 +01:00
|
|
|
s->uthread_no = s->base_thread;
|
2011-02-06 01:21:36 +01:00
|
|
|
|
2011-02-07 22:55:36 +01:00
|
|
|
/* Reached the end of this stream, no more data to read in, otherwise
|
2011-02-21 14:49:50 +01:00
|
|
|
* see if the next thread is free to grab more data. We also check that
|
|
|
|
|
* we're not going to be allocating too much ram to generate all these
|
|
|
|
|
* threads. */
|
2011-02-06 01:21:36 +01:00
|
|
|
if (!last_head)
|
|
|
|
|
s->eos = 1;
|
2011-02-21 14:49:50 +01:00
|
|
|
else if (s->uthread_no != s->unext_thread && !ucthread[s->uthread_no].busy &&
|
2011-03-12 02:19:02 +01:00
|
|
|
sinfo->ram_alloced < control->maxram)
|
2011-02-21 14:49:50 +01:00
|
|
|
goto fill_another;
|
2010-11-16 11:25:32 +01:00
|
|
|
out:
|
2011-02-16 14:24:28 +01:00
|
|
|
lock_mutex(&output_lock);
|
|
|
|
|
output_thread = s->unext_thread;
|
|
|
|
|
cond_broadcast(&output_cond);
|
|
|
|
|
unlock_mutex(&output_lock);
|
2010-11-16 11:25:32 +01:00
|
|
|
|
2011-02-16 14:24:28 +01:00
|
|
|
/* join_pthread here will make it wait till the data is ready */
|
|
|
|
|
join_pthread(threads[s->unext_thread], NULL);
|
|
|
|
|
ucthread[s->unext_thread].busy = 0;
|
|
|
|
|
|
|
|
|
|
print_maxverbose("Taking decompressed data from thread %ld\n", s->unext_thread);
|
2010-12-03 09:35:48 +01:00
|
|
|
s->buf = ucthread[s->unext_thread].s_buf;
|
|
|
|
|
s->buflen = ucthread[s->unext_thread].u_len;
|
2011-02-21 14:49:50 +01:00
|
|
|
sinfo->ram_alloced -= s->buflen;
|
2010-12-03 09:35:48 +01:00
|
|
|
s->bufp = 0;
|
2010-11-16 11:25:32 +01:00
|
|
|
|
2011-02-09 02:39:15 +01:00
|
|
|
if (++s->unext_thread == s->base_thread + s->total_threads)
|
2010-12-03 09:35:48 +01:00
|
|
|
s->unext_thread = s->base_thread;
|
2010-11-16 11:25:32 +01:00
|
|
|
|
2010-03-29 01:07:08 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* write some data to a stream. Return -1 on failure */
|
2011-03-10 22:35:15 +01:00
|
|
|
int write_stream(rzip_control *control, void *ss, int streamno, uchar *p, i64 len)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
|
|
|
|
struct stream_info *sinfo = ss;
|
|
|
|
|
|
|
|
|
|
while (len) {
|
|
|
|
|
i64 n;
|
|
|
|
|
|
2011-03-10 22:35:15 +01:00
|
|
|
n = MIN(sinfo->bufsize - sinfo->s[streamno].buflen, len);
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2011-03-10 22:35:15 +01:00
|
|
|
memcpy(sinfo->s[streamno].buf + sinfo->s[streamno].buflen, p, n);
|
|
|
|
|
sinfo->s[streamno].buflen += n;
|
2010-03-29 01:07:08 +02:00
|
|
|
p += n;
|
|
|
|
|
len -= n;
|
|
|
|
|
|
2010-11-12 15:26:09 +01:00
|
|
|
/* Flush the buffer every sinfo->bufsize into one thread */
|
2011-03-10 22:35:15 +01:00
|
|
|
if (sinfo->s[streamno].buflen == sinfo->bufsize)
|
|
|
|
|
flush_buffer(control, sinfo, streamno);
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* read some data from a stream. Return number of bytes read, or -1
|
|
|
|
|
on failure */
|
2011-03-10 22:35:15 +01:00
|
|
|
i64 read_stream(rzip_control *control, void *ss, int streamno, uchar *p, i64 len)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
|
|
|
|
struct stream_info *sinfo = ss;
|
|
|
|
|
i64 ret = 0;
|
|
|
|
|
|
|
|
|
|
while (len) {
|
|
|
|
|
i64 n;
|
|
|
|
|
|
2011-03-10 22:35:15 +01:00
|
|
|
n = MIN(sinfo->s[streamno].buflen - sinfo->s[streamno].bufp, len);
|
2010-03-29 01:07:08 +02:00
|
|
|
|
|
|
|
|
if (n > 0) {
|
2011-03-10 22:35:15 +01:00
|
|
|
memcpy(p, sinfo->s[streamno].buf + sinfo->s[streamno].bufp, n);
|
|
|
|
|
sinfo->s[streamno].bufp += n;
|
2010-03-29 01:07:08 +02:00
|
|
|
p += n;
|
|
|
|
|
len -= n;
|
|
|
|
|
ret += n;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-10 22:35:15 +01:00
|
|
|
if (len && sinfo->s[streamno].bufp == sinfo->s[streamno].buflen) {
|
|
|
|
|
if (unlikely(fill_buffer(control, sinfo, streamno)))
|
2011-02-10 06:46:35 +01:00
|
|
|
return -1;
|
2011-03-10 22:35:15 +01:00
|
|
|
if (sinfo->s[streamno].bufp == sinfo->s[streamno].buflen)
|
2010-03-29 01:07:08 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* flush and close down a stream. return -1 on failure */
|
2011-03-08 22:32:14 +01:00
|
|
|
int close_stream_out(rzip_control *control, void *ss)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
|
|
|
|
struct stream_info *sinfo = ss;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < sinfo->num_streams; i++) {
|
2010-11-12 15:26:09 +01:00
|
|
|
if (sinfo->s[i].buflen)
|
2011-03-08 22:32:14 +01:00
|
|
|
clear_buffer(control, sinfo, i, 0);
|
2010-03-29 01:07:08 +02:00
|
|
|
}
|
2010-11-12 15:26:09 +01:00
|
|
|
|
2011-03-20 05:45:44 +01:00
|
|
|
if (ENCRYPT) {
|
|
|
|
|
/* Last two compressed blocks do not have an offset written
|
|
|
|
|
* to them so we have to go back and encrypt them now, but we
|
|
|
|
|
* must wait till the threads return. */
|
|
|
|
|
int close_thread = output_thread;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < control->threads; i++) {
|
|
|
|
|
lock_mutex(&cthread[close_thread].mutex);
|
|
|
|
|
unlock_mutex(&cthread[close_thread].mutex);
|
|
|
|
|
if (++close_thread == control->threads)
|
|
|
|
|
close_thread = 0;
|
|
|
|
|
}
|
|
|
|
|
for (i = 0; i < sinfo->num_streams; i++)
|
|
|
|
|
rewrite_encrypted(control, sinfo, sinfo->s[i].last_headofs);
|
|
|
|
|
}
|
|
|
|
|
|
2010-12-10 13:51:59 +01:00
|
|
|
#if 0
|
|
|
|
|
/* These cannot be freed because their values are read after the next
|
|
|
|
|
* stream has started so they're not properly freed and just dropped on
|
|
|
|
|
* program exit! FIXME */
|
2010-03-29 01:07:08 +02:00
|
|
|
free(sinfo->s);
|
|
|
|
|
free(sinfo);
|
2010-12-10 13:51:59 +01:00
|
|
|
#endif
|
2010-03-29 01:07:08 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* close down an input stream */
|
2011-03-14 11:51:27 +01:00
|
|
|
int close_stream_in(void *ss)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
|
|
|
|
struct stream_info *sinfo = ss;
|
|
|
|
|
int i;
|
|
|
|
|
|
2010-11-12 15:26:09 +01:00
|
|
|
for (i = 0; i < sinfo->num_streams; i++)
|
|
|
|
|
free(sinfo->s[i].buf);
|
2010-03-29 01:07:08 +02:00
|
|
|
|
2011-02-16 14:24:28 +01:00
|
|
|
output_thread = 0;
|
2010-11-16 11:25:32 +01:00
|
|
|
free(ucthread);
|
|
|
|
|
free(threads);
|
2010-03-29 01:07:08 +02:00
|
|
|
free(sinfo->s);
|
|
|
|
|
free(sinfo);
|
2010-11-16 11:25:32 +01:00
|
|
|
|
2010-03-29 01:07:08 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* As others are slow and lzo very fast, it is worth doing a quick lzo pass
|
|
|
|
|
to see if there is any compression at all with lzo first. It is unlikely
|
|
|
|
|
that others will be able to compress if lzo is unable to drop a single byte
|
|
|
|
|
so do not compress any block that is incompressible by lzo. */
|
2011-03-08 22:32:14 +01:00
|
|
|
static int lzo_compresses(rzip_control *control, uchar *s_buf, i64 s_len)
|
2010-03-29 01:07:08 +02:00
|
|
|
{
|
2010-11-10 10:56:17 +01:00
|
|
|
lzo_bytep wrkmem = NULL;
|
|
|
|
|
lzo_uint in_len, test_len = s_len, save_len = s_len;
|
2010-03-29 01:07:08 +02:00
|
|
|
lzo_uint dlen;
|
2010-11-10 10:56:17 +01:00
|
|
|
uchar *c_buf = NULL, *test_buf = s_buf;
|
2010-03-29 01:07:08 +02:00
|
|
|
/* set minimum buffer test size based on the length of the test stream */
|
|
|
|
|
unsigned long buftest_size = (test_len > 5 * STREAM_BUFSIZE ? STREAM_BUFSIZE : STREAM_BUFSIZE / 4096);
|
|
|
|
|
int ret = 0;
|
|
|
|
|
int workcounter = 0; /* count # of passes */
|
|
|
|
|
lzo_uint best_dlen = UINT_MAX; /* save best compression estimate */
|
|
|
|
|
|
2011-02-22 15:15:18 +01:00
|
|
|
if (!LZO_TEST)
|
2010-03-29 01:07:08 +02:00
|
|
|
return 1;
|
|
|
|
|
wrkmem = (lzo_bytep) malloc(LZO1X_1_MEM_COMPRESS);
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(wrkmem == NULL))
|
2010-03-29 01:07:08 +02:00
|
|
|
fatal("Unable to allocate wrkmem in lzo_compresses\n");
|
|
|
|
|
|
|
|
|
|
in_len = MIN(test_len, buftest_size);
|
|
|
|
|
dlen = STREAM_BUFSIZE + STREAM_BUFSIZE / 16 + 64 + 3;
|
|
|
|
|
|
|
|
|
|
c_buf = malloc(dlen);
|
2010-11-05 02:16:43 +01:00
|
|
|
if (unlikely(!c_buf))
|
2010-03-29 01:07:08 +02:00
|
|
|
fatal("Unable to allocate c_buf in lzo_compresses\n");
|
|
|
|
|
|
|
|
|
|
/* Test progressively larger blocks at a time and as soon as anything
|
|
|
|
|
compressible is found, jump out as a success */
|
|
|
|
|
while (test_len > 0) {
|
|
|
|
|
workcounter++;
|
2011-03-09 03:25:33 +01:00
|
|
|
lzo1x_1_compress(test_buf, in_len, (uchar *)c_buf, &dlen, wrkmem);
|
2010-03-29 01:07:08 +02:00
|
|
|
|
|
|
|
|
if (dlen < best_dlen)
|
|
|
|
|
best_dlen = dlen; /* save best value */
|
|
|
|
|
|
2011-02-22 15:15:18 +01:00
|
|
|
if (dlen < in_len) {
|
2010-03-29 01:07:08 +02:00
|
|
|
ret = 1;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
/* expand and move buffer */
|
|
|
|
|
test_len -= in_len;
|
|
|
|
|
if (test_len) {
|
|
|
|
|
test_buf += (ptrdiff_t)in_len;
|
|
|
|
|
if (buftest_size < STREAM_BUFSIZE)
|
|
|
|
|
buftest_size <<= 1;
|
|
|
|
|
in_len = MIN(test_len, buftest_size);
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-02-22 15:15:18 +01:00
|
|
|
print_maxverbose("lzo testing %s for chunk %ld. Compressed size = %5.2F%% of chunk, %d Passes\n",
|
|
|
|
|
(ret == 0? "FAILED" : "OK"), save_len,
|
2010-03-29 01:07:08 +02:00
|
|
|
100 * ((double) best_dlen / (double) in_len), workcounter);
|
|
|
|
|
|
|
|
|
|
free(wrkmem);
|
|
|
|
|
free(c_buf);
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|