Imported Upstream version 11.6

This commit is contained in:
Mario Fetka 2021-10-03 10:04:53 +02:00
parent 440edf0227
commit 1f7c89fb07
26 changed files with 541 additions and 287 deletions

View File

@ -1 +1 @@
11.5
11.6

11
HISTORY
View File

@ -1,6 +1,17 @@
SnapRAID HISTORY
================
11.6 2021/10
============
* The 'fix' and 'check' command with the -e option now process the whole
files that have bad blocks, and not only the block marked bad.
This allows to restore the timestamp and to print the paths of
processed files and the final state of the files like 'recovered' or
'unrecovered'. The previous behaviour is available with the -b,
--filter-block-error option.
* Improved the speed of the filtering in 'fix' and 'check'. This phase
happens after the "Selecting..." message. [UhClem]
11.5 2020/05
============
* Removed the default -march=native to allow to deploy in any machine.

View File

@ -590,9 +590,11 @@ static int repair(struct snapraid_state* state, int rehash, unsigned pos, unsign
* For each file, if we are at the last block, closes it,
* adjust the timestamp, and print the result.
*
* This works with the assumption to always process the whole files to
* fix. This assumption is not always correct, and in such case we have to
* skip the whole postprocessing. And example, is when fixing only bad blocks.
* This works only if the whole file is processed, including its last block.
* This doesn't always happen, like with an explicit end block.
*
* In such case, the check/fix command won't report any information of the
* files partially checked.
*/
static int file_post(struct snapraid_state* state, int fix, unsigned i, struct snapraid_handle* handle, unsigned diskmax)
{
@ -601,11 +603,6 @@ static int file_post(struct snapraid_state* state, int fix, unsigned i, struct s
char esc_buffer[ESC_MAX];
char esc_buffer_alt[ESC_MAX];
/* if we are processing only bad blocks, we don't have to do any post-processing */
/* as we don't have any guarantee to process the last block of the fixed files */
if (state->opt.badonly)
return 0;
/* for all the files print the final status, and does the final time fix */
/* we also ensure to close files after processing the last block */
for (j = 0; j < diskmax; ++j) {
@ -614,7 +611,6 @@ static int file_post(struct snapraid_state* state, int fix, unsigned i, struct s
struct snapraid_file* collide_file;
struct snapraid_file* file;
block_off_t file_pos;
char path[PATH_MAX];
uint64_t inode;
disk = handle[j].disk;
@ -630,7 +626,6 @@ static int file_post(struct snapraid_state* state, int fix, unsigned i, struct s
}
file = fs_par2file_get(disk, i, &file_pos);
pathprint(path, sizeof(path), "%s%s", disk->dir, file->sub);
/* if it isn't the last block in the file */
if (!file_block_is_last(file, file_pos)) {
@ -654,8 +649,10 @@ static int file_post(struct snapraid_state* state, int fix, unsigned i, struct s
/* if the file is damaged, meaning that a fix failed */
if (file_flag_has(file, FILE_IS_DAMAGED)) {
/* rename it to .unrecoverable */
char path[PATH_MAX];
char path_to[PATH_MAX];
pathprint(path, sizeof(path), "%s%s", disk->dir, file->sub);
pathprint(path_to, sizeof(path_to), "%s%s.unrecoverable", disk->dir, file->sub);
/* ensure to close the file before renaming */
@ -801,30 +798,52 @@ close_and_continue:
*/
static int block_is_enabled(struct snapraid_state* state, block_off_t i, struct snapraid_handle* handle, unsigned diskmax)
{
snapraid_info info;
unsigned j;
unsigned l;
/* get block specific info */
info = info_get(&state->infoarr, i);
/* filter for bad blocks */
if (state->opt.badblockonly) {
snapraid_info info;
/* if we filter for only bad blocks */
if (state->opt.badonly) {
/* skip if this is not bad */
if (!info_get_bad(info))
return 0;
/* get block specific info */
info = info_get(&state->infoarr, i);
/*
* Filter specifically only for bad blocks
*/
return info_get_bad(info);
}
/* now apply the filters */
/* filter for the parity */
if (state->opt.badfileonly) {
snapraid_info info;
/* if a parity is not excluded, include all blocks, even unused ones */
for (l = 0; l < state->level; ++l) {
if (!state->parity[l].is_excluded_by_filter) {
/* get block specific info */
info = info_get(&state->infoarr, i);
/*
* If the block is bad, it has to be processed
*
* This is not necessary in normal cases because if a block is bad,
* it necessary needs to have a file related to it, and files with
* bad blocks are fully included.
*
* But some files may be excluded by additional filter options,
* so it's not always true, and this ensures to always check all
* the bad blocks.
*/
if (info_get_bad(info))
return 1;
} else {
/* if a parity is not excluded, include all blocks, even unused ones */
for (l = 0; l < state->level; ++l) {
if (!state->parity[l].is_excluded_by_filter) {
return 1;
}
}
}
/* otherwise include only used blocks */
/* filter for the files */
for (j = 0; j < diskmax; ++j) {
struct snapraid_block* block;
@ -868,6 +887,7 @@ static int state_check_process(struct snapraid_state* state, int fix, struct sna
unsigned l;
char esc_buffer[ESC_MAX];
char esc_buffer_alt[ESC_MAX];
bit_vect_t* block_enabled;
handle = handle_mapping(state, &diskmax);
@ -889,14 +909,25 @@ static int state_check_process(struct snapraid_state* state, int fix, struct sna
unrecoverable_error = 0;
recovered_error = 0;
msg_progress("Selecting...\n");
/* first count the number of blocks to process */
countmax = 0;
block_enabled = calloc_nofail(1, bit_vect_size(blockmax)); /* preinitialize to 0 */
for (i = blockstart; i < blockmax; ++i) {
if (!block_is_enabled(state, i, handle, diskmax))
continue;
bit_vect_set(block_enabled, i);
++countmax;
}
if (fix)
msg_progress("Fixing...\n");
else if (!state->opt.auditonly)
msg_progress("Checking...\n");
else
msg_progress("Hashing...\n");
/* check all the blocks in files */
countsize = 0;
countpos = 0;
@ -908,18 +939,8 @@ static int state_check_process(struct snapraid_state* state, int fix, struct sna
snapraid_info info;
int rehash;
if (!block_is_enabled(state, i, handle, diskmax)) {
/* post process the files */
ret = file_post(state, fix, i, handle, diskmax);
if (ret == -1) {
/* LCOV_EXCL_START */
log_fatal("Stopping at block %u\n", i);
++unrecoverable_error;
goto bail;
/* LCOV_EXCL_STOP */
}
/* and now continue with the next block */
if (!bit_vect_test(block_enabled, i)) {
/* continue with the next block */
continue;
}
@ -1906,6 +1927,7 @@ bail:
free(failed);
free(failed_map);
free(block_enabled);
free(handle);
free(buffer_alloc);
free(buffer);
@ -2019,13 +2041,6 @@ int state_check(struct snapraid_state* state, int fix, block_off_t blockstart, b
parity_ptr[l] = 0;
}
if (fix)
msg_progress("Fixing...\n");
else if (!state->opt.auditonly)
msg_progress("Checking...\n");
else
msg_progress("Hashing...\n");
error = 0;
/* skip degenerated cases of empty parity, or skipping all */

View File

@ -28,17 +28,6 @@
/****************************************************************************/
/* dry */
/**
* Check if we have to process the specified block index ::i.
*/
static int block_is_enabled(void* void_plan, block_off_t i)
{
(void)void_plan;
(void)i;
return 1;
}
static void dry_data_reader(struct snapraid_worker* worker, struct snapraid_task* task)
{
struct snapraid_io* io = worker->io;
@ -208,7 +197,7 @@ static int state_dry_process(struct snapraid_state* state, struct snapraid_parit
countpos = 0;
/* start all the worker threads */
io_start(&io, blockstart, blockmax, &block_is_enabled, 0);
io_start(&io, blockstart, blockmax, 0);
state_progress_begin(state, blockstart, blockmax, countmax);
while (1) {

View File

@ -41,18 +41,19 @@ struct snapraid_hash* hash_alloc(struct snapraid_state* state, struct snapraid_d
struct snapraid_hash* hash;
block_off_t i;
unsigned char* buf;
size_t hash_size = BLOCK_HASH_SIZE;
hash = malloc_nofail(sizeof(struct snapraid_hash));
hash->disk = disk;
hash->file = file;
buf = malloc_nofail(file->blockmax * BLOCK_HASH_SIZE);
buf = malloc_nofail(file->blockmax * hash_size);
/* set the back pointer */
for (i = 0; i < file->blockmax; ++i) {
struct snapraid_block* block = fs_file2block_get(file, i);
memcpy(buf + i * BLOCK_HASH_SIZE, block->hash, BLOCK_HASH_SIZE);
memcpy(buf + i * hash_size, block->hash, hash_size);
if (!block_has_updated_hash(block)) {
free(buf);
@ -61,7 +62,7 @@ struct snapraid_hash* hash_alloc(struct snapraid_state* state, struct snapraid_d
}
}
memhash(state->besthash, state->hashseed, hash->hash, buf, file->blockmax * BLOCK_HASH_SIZE);
memhash(state->besthash, state->hashseed, hash->hash, buf, file->blockmax * hash_size);
free(buf);

View File

@ -736,7 +736,7 @@ int dir_name_compare(const void* void_arg, const void* void_data)
return strcmp(arg, dir->sub);
}
struct snapraid_disk* disk_alloc(const char* name, const char* dir, uint64_t dev, const char* uuid, int skip)
struct snapraid_disk* disk_alloc(const char* name, const char* dir, uint64_t dev, const char* uuid, int skip_access)
{
struct snapraid_disk* disk;
@ -748,8 +748,9 @@ struct snapraid_disk* disk_alloc(const char* name, const char* dir, uint64_t dev
/* ensure that the dir terminate with "/" if it isn't empty */
pathslash(disk->dir, sizeof(disk->dir));
#if HAVE_PTHREAD
thread_mutex_init(&disk->fs_mutex, 0);
#if HAVE_THREAD
thread_mutex_init(&disk->fs_mutex);
disk->fs_mutex_enabled = 0; /* lock will be enabled at threads start */
#endif
disk->smartctl[0] = 0;
@ -767,7 +768,7 @@ struct snapraid_disk* disk_alloc(const char* name, const char* dir, uint64_t dev
disk->has_unsupported_uuid = *uuid == 0; /* empty UUID means unsupported */
disk->had_empty_uuid = 0;
disk->mapping_idx = -1;
disk->skip_access = skip;
disk->skip_access = skip_access;
tommy_list_init(&disk->filelist);
tommy_list_init(&disk->deletedlist);
tommy_hashdyn_init(&disk->inodeset);
@ -797,17 +798,27 @@ void disk_free(struct snapraid_disk* disk)
tommy_list_foreach(&disk->dirlist, (tommy_foreach_func*)dir_free);
tommy_hashdyn_done(&disk->dirset);
#if HAVE_PTHREAD
#if HAVE_THREAD
thread_mutex_destroy(&disk->fs_mutex);
#endif
free(disk);
}
void disk_start_thread(struct snapraid_disk* disk)
{
#if HAVE_THREAD
disk->fs_mutex_enabled = 1;
#else
(void)disk;
#endif
}
static inline void fs_lock(struct snapraid_disk* disk)
{
#if HAVE_PTHREAD
thread_mutex_lock(&disk->fs_mutex);
#if HAVE_THREAD
if (disk->fs_mutex_enabled)
thread_mutex_lock(&disk->fs_mutex);
#else
(void)disk;
#endif
@ -815,8 +826,9 @@ static inline void fs_lock(struct snapraid_disk* disk)
static inline void fs_unlock(struct snapraid_disk* disk)
{
#if HAVE_PTHREAD
thread_mutex_unlock(&disk->fs_mutex);
#if HAVE_THREAD
if (disk->fs_mutex_enabled)
thread_mutex_unlock(&disk->fs_mutex);
#else
(void)disk;
#endif

View File

@ -357,7 +357,7 @@ struct snapraid_disk {
int mapping_idx; /**< Index in the mapping vector. Used only as buffer when writing the content file. */
int skip_access; /**< If the disk is inaccessible and it should be skipped. */
#if HAVE_PTHREAD
#if HAVE_THREAD
/**
* Mutex for protecting the filesystem structure.
*
@ -367,7 +367,8 @@ struct snapraid_disk {
* Files, links and dirs are not protected as they are not expected to
* change during multithread processing.
*/
pthread_mutex_t fs_mutex;
thread_mutex_t fs_mutex;
int fs_mutex_enabled; /*< If the lock has to be used. */
#endif
/**
@ -968,13 +969,18 @@ static inline tommy_uint32_t dir_name_hash(const char* name)
/**
* Allocate a disk.
*/
struct snapraid_disk* disk_alloc(const char* name, const char* dir, uint64_t dev, const char* uuid, int skip);
struct snapraid_disk* disk_alloc(const char* name, const char* dir, uint64_t dev, const char* uuid, int skip_access);
/**
* Deallocate a disk.
*/
void disk_free(struct snapraid_disk* disk);
/**
* Enable multithread support for the disk.
*/
void disk_start_thread(struct snapraid_disk* disk);
/**
* Get the size of the disk in blocks.
*/

View File

@ -21,7 +21,7 @@
void (*io_start)(struct snapraid_io* io,
block_off_t blockstart, block_off_t blockmax,
int (*block_is_enabled)(void* arg, block_off_t), void* blockarg) = 0;
bit_vect_t* block_enabled) = 0;
void (*io_stop)(struct snapraid_io* io) = 0;
block_off_t (*io_read_next)(struct snapraid_io* io, void*** buffer) = 0;
struct snapraid_task* (*io_data_read)(struct snapraid_io* io, unsigned* diskcur, unsigned* waiting_map, unsigned* waiting_mac) = 0;
@ -40,8 +40,10 @@ static block_off_t io_position_next(struct snapraid_io* io)
block_off_t blockcur;
/* get the next position */
while (io->block_next < io->block_max && !io->block_is_enabled(io->block_arg, io->block_next))
++io->block_next;
if (io->block_enabled) {
while (io->block_next < io->block_max && !bit_vect_test(io->block_enabled, io->block_next))
++io->block_next;
}
blockcur = io->block_next;
@ -257,12 +259,11 @@ static void io_parity_write_mono(struct snapraid_io* io, unsigned* pos, unsigned
static void io_start_mono(struct snapraid_io* io,
block_off_t blockstart, block_off_t blockmax,
int (*block_is_enabled)(void* arg, block_off_t), void* blockarg)
bit_vect_t* block_enabled)
{
io->block_start = blockstart;
io->block_max = blockmax;
io->block_is_enabled = block_is_enabled;
io->block_arg = blockarg;
io->block_enabled = block_enabled;
io->block_next = blockstart;
}
@ -275,7 +276,7 @@ static void io_stop_mono(struct snapraid_io* io)
/* multi thread */
/* disable multithread if pthread is not present */
#if HAVE_PTHREAD
#if HAVE_THREAD
/**
* Get the next task to work on for a reader.
@ -767,14 +768,20 @@ static void* io_writer_thread(void* arg)
static void io_start_thread(struct snapraid_io* io,
block_off_t blockstart, block_off_t blockmax,
int (*block_is_enabled)(void* arg, block_off_t), void* blockarg)
bit_vect_t* block_enabled)
{
unsigned i;
tommy_node* j;
/* enable the filesystem mutex in all disks */
for (j = io->state->disklist; j != 0; j = j->next) {
struct snapraid_disk* disk = j->data;
disk_start_thread(disk);
}
io->block_start = blockstart;
io->block_max = blockmax;
io->block_is_enabled = block_is_enabled;
io->block_arg = blockarg;
io->block_enabled = block_enabled;
io->block_next = blockstart;
io->done = 0;
@ -804,7 +811,7 @@ static void io_start_thread(struct snapraid_io* io,
worker->index = 0;
thread_create(&worker->thread, 0, io_reader_thread, worker);
thread_create(&worker->thread, io_reader_thread, worker);
}
/* start the writer threads */
@ -813,7 +820,7 @@ static void io_start_thread(struct snapraid_io* io,
worker->index = io->io_max - 1;
thread_create(&worker->thread, 0, io_writer_thread, worker);
thread_create(&worker->thread, io_writer_thread, worker);
}
}
@ -865,11 +872,12 @@ void io_init(struct snapraid_io* io, struct snapraid_state* state,
struct snapraid_parity_handle* parity_handle_map, unsigned parity_handle_max)
{
unsigned i;
size_t allocated;
size_t allocated_size;
size_t block_size = state->block_size;
io->state = state;
#if HAVE_PTHREAD
#if HAVE_THREAD
if (io_cache == 0) {
/* default is 8 MiB of cache */
/* this seems to be a good tradeoff between speed and memory usage */
@ -891,18 +899,18 @@ void io_init(struct snapraid_io* io, struct snapraid_state* state,
assert(io->io_max == 1 || (io->io_max >= IO_MIN && io->io_max <= IO_MAX));
io->buffer_max = buffer_max;
allocated = 0;
allocated_size = 0;
for (i = 0; i < io->io_max; ++i) {
if (state->file_mode != ADVISE_DIRECT)
io->buffer_map[i] = malloc_nofail_vector_align(handle_max, buffer_max, state->block_size, &io->buffer_alloc_map[i]);
io->buffer_map[i] = malloc_nofail_vector_align(handle_max, buffer_max, block_size, &io->buffer_alloc_map[i]);
else
io->buffer_map[i] = malloc_nofail_vector_direct(handle_max, buffer_max, state->block_size, &io->buffer_alloc_map[i]);
io->buffer_map[i] = malloc_nofail_vector_direct(handle_max, buffer_max, block_size, &io->buffer_alloc_map[i]);
if (!state->opt.skip_self)
mtest_vector(io->buffer_max, state->block_size, io->buffer_map[i]);
allocated += state->block_size * buffer_max;
allocated_size += block_size * buffer_max;
}
msg_progress("Using %u MiB of memory for %u cached blocks.\n", (unsigned)(allocated / MEBI), io->io_max);
msg_progress("Using %u MiB of memory for %u cached blocks.\n", (unsigned)(allocated_size / MEBI), io->io_max);
if (parity_writer) {
io->reader_max = handle_max;
@ -960,7 +968,7 @@ void io_init(struct snapraid_io* io, struct snapraid_state* state,
worker->buffer_skew = handle_max;
}
#if HAVE_PTHREAD
#if HAVE_THREAD
if (io->io_max > 1) {
io_read_next = io_read_next_thread;
io_write_preset = io_write_preset_thread;
@ -972,11 +980,11 @@ void io_init(struct snapraid_io* io, struct snapraid_state* state,
io_start = io_start_thread;
io_stop = io_stop_thread;
thread_mutex_init(&io->io_mutex, 0);
thread_cond_init(&io->read_done, 0);
thread_cond_init(&io->read_sched, 0);
thread_cond_init(&io->write_done, 0);
thread_cond_init(&io->write_sched, 0);
thread_mutex_init(&io->io_mutex);
thread_cond_init(&io->read_done);
thread_cond_init(&io->read_sched);
thread_cond_init(&io->write_done);
thread_cond_init(&io->write_sched);
} else
#endif
{
@ -1006,7 +1014,7 @@ void io_done(struct snapraid_io* io)
free(io->writer_map);
free(io->writer_list);
#if HAVE_PTHREAD
#if HAVE_THREAD
if (io->io_max > 1) {
thread_mutex_destroy(&io->io_mutex);
thread_cond_destroy(&io->read_done);

View File

@ -87,8 +87,8 @@ struct snapraid_task {
* from a specific disk.
*/
struct snapraid_worker {
#if HAVE_PTHREAD
pthread_t thread; /**< Thread context for the worker. */
#if HAVE_THREAD
thread_id_t thread; /**< Thread context for the worker. */
#endif
struct snapraid_io* io; /**< Parent pointer. */
@ -147,12 +147,12 @@ struct snapraid_io {
*/
unsigned io_max;
#if HAVE_PTHREAD
#if HAVE_THREAD
/**
* Mutex used to protect the synchronization
* between the io and the workers.
*/
pthread_mutex_t io_mutex;
thread_mutex_t io_mutex;
/**
* Condition for a new read is completed.
@ -161,7 +161,7 @@ struct snapraid_io {
* The IO waits on this condition when it's waiting for
* a new read to be completed.
*/
pthread_cond_t read_done;
thread_cond_t read_done;
/**
* Condition for a new read scheduled.
@ -170,7 +170,7 @@ struct snapraid_io {
* read to process.
* The IO signals this condition when new reads are scheduled.
*/
pthread_cond_t read_sched;
thread_cond_t read_sched;
/**
* Condition for a new write is completed.
@ -179,7 +179,7 @@ struct snapraid_io {
* The IO waits on this condition when it's waiting for
* a new write to be completed.
*/
pthread_cond_t write_done;
thread_cond_t write_done;
/**
* Condition for a new write scheduled.
@ -188,7 +188,7 @@ struct snapraid_io {
* write to process.
* The IO signals this condition when new writes are scheduled.
*/
pthread_cond_t write_sched;
thread_cond_t write_sched;
#endif
/**
@ -217,8 +217,7 @@ struct snapraid_io {
block_off_t block_start;
block_off_t block_max;
block_off_t block_next;
int (*block_is_enabled)(void* arg, block_off_t);
void* block_arg;
bit_vect_t* block_enabled;
/**
* Buffers for data.
@ -315,7 +314,7 @@ void io_done(struct snapraid_io* io);
*/
extern void (*io_start)(struct snapraid_io* io,
block_off_t blockstart, block_off_t blockmax,
int (*block_is_enabled)(void* arg, block_off_t), void* blockarg);
bit_vect_t* block_enabled);
/**
* Stop all the worker threads.

View File

@ -56,12 +56,12 @@ static ULONGLONG (WINAPI* ptr_GetTickCount64)(void);
* Description of the last error.
* It's stored in the thread local storage.
*/
static pthread_key_t last_error;
static windows_key_t last_error;
/**
* Monotone tick counter
*/
static pthread_mutex_t tick_lock;
static windows_mutex_t tick_lock;
static uint64_t tick_last;
/**
@ -119,14 +119,14 @@ void os_init(int opt)
is_scan_winfind = opt != 0;
/* initialize the thread local storage for strerror(), using free() as destructor */
if (pthread_key_create(&last_error, free) != 0) {
log_fatal("Error calling pthread_key_create().\n");
if (windows_key_create(&last_error, free) != 0) {
log_fatal("Error calling windows_key_create().\n");
exit(EXIT_FAILURE);
}
tick_last = 0;
if (pthread_mutex_init(&tick_lock, 0) != 0) {
log_fatal("Error calling pthread_mutex_init().\n");
if (windows_mutex_init(&tick_lock, 0) != 0) {
log_fatal("Error calling windows_mutex_init().\n");
exit(EXIT_FAILURE);
}
@ -173,9 +173,9 @@ void os_init(int opt)
void os_done(void)
{
/* delete the thread local storage for strerror() */
pthread_key_delete(last_error);
windows_key_delete(last_error);
pthread_mutex_destroy(&tick_lock);
windows_mutex_destroy(&tick_lock);
/* restore the normal execution level */
SetThreadExecutionState(WIN32_ES_CONTINUOUS);
@ -1811,10 +1811,10 @@ const char* windows_strerror(int err)
snprintf(error, len, "%s [%d/%u]", str, err, (unsigned)GetLastError());
/* get previous one, if any */
previous = pthread_getspecific(last_error);
previous = windows_getspecific(last_error);
/* store in the thread local storage */
if (pthread_setspecific(last_error, error) != 0) {
if (windows_setspecific(last_error, error) != 0) {
free(error);
return str;
}
@ -2024,7 +2024,7 @@ uint64_t tick(void)
* We had reports of invalid stats due faulty High Precision Event Timer.
* See: https://sourceforge.net/p/snapraid/discussion/1677233/thread/a2122fd6/
*/
pthread_mutex_lock(&tick_lock);
windows_mutex_lock(&tick_lock);
/*
* MSDN 'QueryPerformanceCounter'
@ -2039,7 +2039,7 @@ uint64_t tick(void)
r = tick_last;
tick_last = r;
pthread_mutex_unlock(&tick_lock);
windows_mutex_unlock(&tick_lock);
return r;
}
@ -2600,7 +2600,7 @@ static int device_thread(tommy_list* list, void* (*func)(void* arg))
for (i = tommy_list_head(list); i != 0; i = i->next) {
devinfo_t* devinfo = i->data;
thread_create(&devinfo->thread, 0, func, devinfo);
thread_create(&devinfo->thread, func, devinfo);
}
/* joins all threads */
@ -2690,109 +2690,221 @@ int devquery(tommy_list* high, tommy_list* low, int operation, int others)
}
/****************************************************************************/
/* thread */
/* pthread like interface */
int windows_mutex_init(windows_mutex_t* mutex, void* attr)
{
CRITICAL_SECTION* cs;
(void)attr;
cs = malloc(sizeof(CRITICAL_SECTION));
if (!cs)
return -1;
InitializeCriticalSection(cs);
*mutex = cs;
InitializeCriticalSection(mutex);
return 0;
}
int windows_mutex_destroy(windows_mutex_t* mutex)
{
CRITICAL_SECTION* cs = *mutex;
DeleteCriticalSection(cs);
free(cs);
DeleteCriticalSection(mutex);
return 0;
}
int windows_mutex_lock(windows_mutex_t* mutex)
{
CRITICAL_SECTION* cs = *mutex;
EnterCriticalSection(cs);
EnterCriticalSection(mutex);
return 0;
}
int windows_mutex_unlock(windows_mutex_t* mutex)
{
CRITICAL_SECTION* cs = *mutex;
LeaveCriticalSection(cs);
LeaveCriticalSection(mutex);
return 0;
}
int windows_cond_init(windows_cond_t* cond, void* attr)
{
CONDITION_VARIABLE* cv;
(void)attr;
cv = malloc(sizeof(CONDITION_VARIABLE));
if (!cv)
return -1;
InitializeConditionVariable(cv);
*cond = cv;
InitializeConditionVariable(cond);
return 0;
}
int windows_cond_destroy(windows_cond_t* cond)
{
CONDITION_VARIABLE* cv = *cond;
/* note that in Windows there is no DeleteConditionVariable() to call */
free(cv);
(void)cond;
return 0;
}
int windows_cond_signal(windows_cond_t* cond)
{
CONDITION_VARIABLE* cv = *cond;
WakeConditionVariable(cv);
WakeConditionVariable(cond);
return 0;
}
int windows_cond_broadcast(windows_cond_t* cond)
{
CONDITION_VARIABLE* cv = *cond;
WakeAllConditionVariable(cv);
WakeAllConditionVariable(cond);
return 0;
}
int windows_cond_wait(windows_cond_t* cond, windows_mutex_t* mutex)
{
CONDITION_VARIABLE* cv = *cond;
CRITICAL_SECTION* cs = *mutex;
if (!SleepConditionVariableCS(cv, cs, INFINITE))
if (!SleepConditionVariableCS(cond, mutex, INFINITE))
return -1;
return 0;
}
struct windows_key_context {
void (* func)(void *);
DWORD key;
tommy_node node;
};
/* list of all keys with destructor */
static tommy_list windows_key_list = { 0 };
int windows_key_create(windows_key_t* key, void(* destructor)(void*))
{
struct windows_key_context* context;
context = malloc(sizeof(struct windows_key_context));
if (!context)
return -1;
context->func = destructor;
context->key = TlsAlloc();
if (context->key == 0xFFFFFFFF) {
windows_errno(GetLastError());
free(context);
return -1;
}
/* insert in the list of destructors */
if (context->func)
tommy_list_insert_tail(&windows_key_list, &context->node, context);
*key = context;
return 0;
}
int windows_key_delete(windows_key_t key)
{
struct windows_key_context* context = key;
/* remove from the list of destructors */
if (context->func)
tommy_list_remove_existing(&windows_key_list, &context->node);
TlsFree(context->key);
free(context);
return 0;
}
void* windows_getspecific(windows_key_t key)
{
struct windows_key_context* context = key;
return TlsGetValue(context->key);
}
int windows_setspecific(windows_key_t key, void* value)
{
struct windows_key_context* context = key;
if (!TlsSetValue(context->key, value)) {
windows_errno(GetLastError());
return -1;
}
return 0;
}
struct windows_thread_context {
HANDLE h;
unsigned id;
void* (* func)(void *);
void* arg;
void* ret;
};
/* forwarder to change the function declaration */
static unsigned __stdcall windows_thread_func(void* arg)
{
struct windows_thread_context* context = arg;
tommy_node* i;
context->ret = context->func(context->arg);
/* call the destructor of all the keys */
i = tommy_list_head(&windows_key_list);
while (i) {
struct windows_key_context* key = i->data;
if (key->func) {
void* value = windows_getspecific(key);
if (value)
key->func(value);
}
i = i->next;
}
return 0;
}
int windows_create(thread_id_t* thread, void* attr, void* (* func)(void *), void* arg)
{
struct windows_thread_context* context;
(void)attr;
context = malloc(sizeof(struct windows_thread_context));
if (!context)
return -1;
context->func = func;
context->arg = arg;
context->ret = 0;
context->h = (void*)_beginthreadex(0, 0, windows_thread_func, context, 0, &context->id);
if (context->h == 0) {
free(context);
return -1;
}
*thread = context;
return 0;
}
int windows_join(thread_id_t thread, void** retval)
{
struct windows_thread_context* context = thread;
if (WaitForSingleObject(context->h, INFINITE) != WAIT_OBJECT_0) {
windows_errno(GetLastError());
return -1;
}
if (!CloseHandle(context->h)) {
windows_errno(GetLastError());
return -1;
}
*retval = context->ret;
free(context);
return 0;
}
#endif

View File

@ -385,23 +385,8 @@ size_t windows_direct_size(void);
/****************************************************************************/
/* thread */
#define pthread_mutex_t windows_mutex_t
#define pthread_cond_t windows_cond_t
#define pthread_mutex_init windows_mutex_init
#define pthread_mutex_destroy windows_mutex_destroy
#define pthread_mutex_lock windows_mutex_lock
#define pthread_mutex_unlock windows_mutex_unlock
#define pthread_cond_init windows_cond_init
#define pthread_cond_destroy windows_cond_destroy
#define pthread_cond_signal windows_cond_signal
#define pthread_cond_broadcast windows_cond_broadcast
#define pthread_cond_wait windows_cond_wait
typedef void* windows_mutex_t;
typedef void* windows_cond_t;
/**
* Like pthread_* equivalent.
* Like the pthread_* equivalent.
*/
int windows_mutex_init(windows_mutex_t* mutex, void* attr);
int windows_mutex_destroy(windows_mutex_t* mutex);
@ -412,6 +397,12 @@ int windows_cond_destroy(windows_cond_t* cond);
int windows_cond_signal(windows_cond_t* cond);
int windows_cond_broadcast(windows_cond_t* cond);
int windows_cond_wait(windows_cond_t* cond, windows_mutex_t* mutex);
int windows_key_create(windows_key_t* key, void(* destructor)(void*));
int windows_key_delete(windows_key_t key);
void* windows_getspecific(windows_key_t key);
int windows_setspecific(windows_key_t key, void* value);
int windows_create(thread_id_t* thread, void* attr, void* (* func)(void *), void *arg);
int windows_join(thread_id_t thread, void** retval);
#endif
#endif

View File

@ -230,10 +230,6 @@
#include "fnmatch.h"
#endif
#if HAVE_PTHREAD_H
#include <pthread.h>
#endif
#if HAVE_MATH_H
#include <math.h>
#endif
@ -245,8 +241,37 @@
/**
* Enable thread use.
*/
#ifdef _WIN32
#define HAVE_THREAD 1
typedef void* windows_thread_t;
typedef CRITICAL_SECTION windows_mutex_t;
typedef CONDITION_VARIABLE windows_cond_t;
typedef void* windows_key_t;
/* remap to pthread */
#define thread_id_t windows_thread_t
#define thread_mutex_t windows_mutex_t
#define thread_cond_t windows_cond_t
#define pthread_mutex_init windows_mutex_init
#define pthread_mutex_destroy windows_mutex_destroy
#define pthread_mutex_lock windows_mutex_lock
#define pthread_mutex_unlock windows_mutex_unlock
#define pthread_cond_init windows_cond_init
#define pthread_cond_destroy windows_cond_destroy
#define pthread_cond_signal windows_cond_signal
#define pthread_cond_broadcast windows_cond_broadcast
#define pthread_cond_wait windows_cond_wait
#define pthread_create windows_create
#define pthread_join windows_join
#else
#if HAVE_PTHREAD_H
#include <pthread.h>
#endif
#if HAVE_PTHREAD_CREATE
#define HAVE_PTHREAD 1
#define HAVE_THREAD 1
typedef pthread_t thread_id_t;
typedef pthread_mutex_t thread_mutex_t;
typedef pthread_cond_t thread_cond_t;
#endif
#endif
/**
@ -470,8 +495,8 @@ struct devinfo_struct {
char smart_serial[SMART_MAX]; /**< SMART serial number. */
char smart_vendor[SMART_MAX]; /**< SMART vendor. */
char smart_model[SMART_MAX]; /**< SMART model. */
#if HAVE_PTHREAD
pthread_t thread;
#if HAVE_THREAD
thread_id_t thread;
#endif
tommy_node node;
};

View File

@ -50,9 +50,8 @@ struct snapraid_plan {
/**
* Check if we have to process the specified block index ::i.
*/
static int block_is_enabled(void* void_plan, block_off_t i)
static int block_is_enabled(struct snapraid_plan* plan, block_off_t i)
{
struct snapraid_plan* plan = void_plan;
time_t blocktime;
snapraid_info info;
@ -268,6 +267,7 @@ static int state_scrub_process(struct snapraid_state* state, struct snapraid_par
unsigned* waiting_map;
unsigned waiting_mac;
char esc_buffer[ESC_MAX];
bit_vect_t* block_enabled;
/* maps the disks to handles */
handle = handle_mapping(state, &diskmax);
@ -289,12 +289,16 @@ static int state_scrub_process(struct snapraid_state* state, struct snapraid_par
silent_error = 0;
io_error = 0;
msg_progress("Selecting...\n");
/* first count the number of blocks to process */
countmax = 0;
plan->countlast = 0;
block_enabled = calloc_nofail(1, bit_vect_size(blockmax)); /* preinitialize to 0 */
for (blockcur = blockstart; blockcur < blockmax; ++blockcur) {
if (!block_is_enabled(plan, blockcur))
continue;
bit_vect_set(block_enabled, blockcur);
++countmax;
}
@ -310,10 +314,11 @@ static int state_scrub_process(struct snapraid_state* state, struct snapraid_par
countsize = 0;
countpos = 0;
plan->countlast = 0;
msg_progress("Scrubbing...\n");
/* start all the worker threads */
io_start(&io, blockstart, blockmax, &block_is_enabled, plan);
io_start(&io, blockstart, blockmax, block_enabled);
state_progress_begin(state, blockstart, blockmax, countmax);
while (1) {
@ -700,6 +705,7 @@ bail:
free(rehandle_alloc);
free(waiting_map);
io_done(&io);
free(block_enabled);
if (state->opt.expect_recoverable) {
if (error + silent_error + io_error == 0)
@ -874,8 +880,6 @@ int state_scrub(struct snapraid_state* state, int plan, int olderthan)
}
}
msg_progress("Scrubbing...\n");
error = 0;
ret = state_scrub_process(state, parity_handle, 0, blockmax, &ps, now);

View File

@ -131,13 +131,20 @@ void log_open(const char* file)
char text_D[32];
time_t t;
struct tm* tm;
#if HAVE_LOCALTIME_R
struct tm tm_res;
#endif
/* leave stdlog at 0 if not specified */
if (file == 0)
return;
t = time(0);
#if HAVE_LOCALTIME_R
tm = localtime_r(&t, &tm_res);
#else
tm = localtime(&t);
#endif
if (tm) {
strftime(text_T, sizeof(text_T), "%H%M%S", tm);
strftime(text_D, sizeof(text_T), "%Y%m%d", tm);
@ -308,6 +315,7 @@ struct option long_options[] = {
{ "filter-disk", 1, 0, 'd' },
{ "filter-missing", 0, 0, 'm' },
{ "filter-error", 0, 0, 'e' },
{ "filter-block-error", 0, 0, 'b' },
{ "percentage", 1, 0, 'p' }, /* legacy name for --plan */
{ "plan", 1, 0, 'p' },
{ "older-than", 1, 0, 'o' },
@ -471,7 +479,7 @@ struct option long_options[] = {
};
#endif
#define OPTIONS "c:f:d:mep:o:S:B:L:i:l:ZEUDNFRahTC:vqHVG"
#define OPTIONS "c:f:d:mebp:o:S:B:L:i:l:ZEUDNFRahTC:vqHVG"
volatile int global_interrupt = 0;
@ -552,6 +560,9 @@ int main(int argc, char* argv[])
int period;
time_t t;
struct tm* tm;
#if HAVE_LOCALTIME_R
struct tm tm_res;
#endif
int i;
test(argc, argv);
@ -617,10 +628,18 @@ int main(int argc, char* argv[])
opt.expected_missing = 1;
break;
case 'e' :
/* when processing only error, we filter both files and blocks */
/* when processing only error, we filter files */
/* and we apply fixes only to synced ones */
filter_error = 1;
opt.badonly = 1;
opt.badfileonly = 1;
opt.syncedonly = 1;
break;
case 'b' :
/* when processing only block with error, we filter both files and blocks */
/* and we apply fixes only to synced ones */
filter_error = 1;
opt.badfileonly = 1;
opt.badblockonly = 1;
opt.syncedonly = 1;
break;
case 'p' :
@ -862,7 +881,7 @@ int main(int argc, char* argv[])
opt.force_stats = 1;
break;
case OPT_TEST_COND_SIGNAL_OUTSIDE :
#if HAVE_PTHREAD
#if HAVE_THREAD
thread_cond_signal_outside = 1;
#endif
break;
@ -1225,7 +1244,11 @@ int main(int argc, char* argv[])
/* print generic info into the log */
t = time(0);
#if HAVE_LOCALTIME_R
tm = localtime_r(&t, &tm_res);
#else
tm = localtime(&t);
#endif
log_tag("version:%s\n", PACKAGE_VERSION);
log_tag("unixtime:%" PRIi64 "\n", (int64_t)t);
if (tm) {

View File

@ -44,7 +44,7 @@
* Multi thread for verify is instead always generally faster,
* so we enable it if possible.
*/
#if HAVE_PTHREAD
#if HAVE_THREAD
/* #define HAVE_MT_WRITE 1 */
#define HAVE_MT_VERIFY 1
#endif
@ -2881,7 +2881,7 @@ static void state_read_content(struct snapraid_state* state, const char* path, S
struct state_write_thread_context {
struct snapraid_state* state;
#if HAVE_MT_WRITE
pthread_t thread;
thread_id_t thread;
#endif
/* input */
block_off_t blockmax;
@ -3498,7 +3498,7 @@ static void state_write_content(struct snapraid_state* state, uint32_t* out_crc)
context->info_has_rehash = info_has_rehash;
context->f = f;
thread_create(&context->thread, 0, state_write_thread, context);
thread_create(&context->thread, state_write_thread, context);
i = i->next;
}
@ -3843,7 +3843,7 @@ struct state_verify_thread_context {
struct snapraid_state* state;
struct snapraid_content* content;
#if HAVE_MT_VERIFY
pthread_t thread;
thread_id_t thread;
#else
void* retval;
#endif
@ -3934,7 +3934,7 @@ static void state_verify_content(struct snapraid_state* state, uint32_t crc)
context->f = f;
#if HAVE_MT_VERIFY
thread_create(&context->thread, 0, state_verify_thread, context);
thread_create(&context->thread, state_verify_thread, context);
#else
context->retval = state_verify_thread(context);
#endif
@ -4117,7 +4117,7 @@ void state_filter(struct snapraid_state* state, tommy_list* filterlist_file, tom
if (!filter_missing && !filter_error && tommy_list_empty(filterlist_file) && tommy_list_empty(filterlist_disk))
return;
msg_progress("Filtering...\n");
msg_progress("Selecting...\n");
for (i = tommy_list_head(filterlist_disk); i != 0; i = i->next) {
struct snapraid_filter* filter = i->data;
@ -4564,7 +4564,7 @@ int state_progress(struct snapraid_state* state, struct snapraid_io* io, block_o
out_size_speed = (unsigned)(delta_size / MEGA / delta_time);
/* estimate the speed in block/s */
if (delta_pos != 0)
if (delta_time != 0)
out_block_speed = (unsigned)(delta_pos / delta_time);
/* estimate the cpu usage percentage */
@ -4591,7 +4591,7 @@ int state_progress(struct snapraid_state* state, struct snapraid_io* io, block_o
msg_bar("%u%%, %u MB", out_perc, (unsigned)(countsize / MEGA));
if (out_computed) {
msg_bar(", %u MB/s", out_size_speed);
msg_bar(", %u block/s", out_block_speed);
msg_bar(", %u stripe/s", out_block_speed);
msg_bar(", CPU %u%%", out_cpu);
msg_bar(", %u:%02u ETA", out_eta / 60, out_eta % 60);
}

View File

@ -73,7 +73,8 @@ extern volatile int global_interrupt;
struct snapraid_option {
int gui; /**< Gui output. */
int auditonly; /**< In check, checks only the hash and not the parity. */
int badonly; /**< In fix, fixes only the blocks marked as bad. */
int badfileonly; /**< In fix, fixes only files marked as bad. */
int badblockonly; /**< In fix, fixes only the blocks marked as bad. */
int syncedonly; /**< In fix, fixes only files that are synced. */
int prehash; /**< Enables the prehash mode for sync. */
unsigned io_error_limit; /**< Max number of input/output errors before aborting. */

View File

@ -501,15 +501,34 @@ int sgetu32(STREAM* f, uint32_t* value)
int c;
c = sgetc(f);
if (c >= '0' && c <= '9') {
if (c == '0') {
*value = 0;
return 0;
} else if (c >= '1' && c <= '9') {
uint32_t v;
v = c - '0';
c = sgetc(f);
while (c >= '0' && c <= '9') {
uint32_t digit;
if (v > 0xFFFFFFFFU / 10) {
/* LCOV_EXCL_START */
/* overflow */
return -1;
/* LCOV_EXCL_STOP */
}
v *= 10;
v += c - '0';
digit = c - '0';
if (v > 0xFFFFFFFFU - digit) {
/* LCOV_EXCL_START */
/* overflow */
return -1;
/* LCOV_EXCL_STOP */
}
v += digit;
c = sgetc(f);
}

View File

@ -25,51 +25,51 @@
/**
* Locks used externally.
*/
#if HAVE_PTHREAD
static pthread_mutex_t msg_lock;
static pthread_mutex_t memory_lock;
#if HAVE_THREAD
static thread_mutex_t msg_lock;
static thread_mutex_t memory_lock;
#endif
void lock_msg(void)
{
#if HAVE_PTHREAD
#if HAVE_THREAD
thread_mutex_lock(&msg_lock);
#endif
}
void unlock_msg(void)
{
#if HAVE_PTHREAD
#if HAVE_THREAD
thread_mutex_unlock(&msg_lock);
#endif
}
void lock_memory(void)
{
#if HAVE_PTHREAD
#if HAVE_THREAD
thread_mutex_lock(&memory_lock);
#endif
}
void unlock_memory(void)
{
#if HAVE_PTHREAD
#if HAVE_THREAD
thread_mutex_unlock(&memory_lock);
#endif
}
void lock_init(void)
{
#if HAVE_PTHREAD
#if HAVE_THREAD
/* initialize the locks as first operation as log_fatal depends on them */
thread_mutex_init(&msg_lock, 0);
thread_mutex_init(&memory_lock, 0);
thread_mutex_init(&msg_lock);
thread_mutex_init(&memory_lock);
#endif
}
void lock_done(void)
{
#if HAVE_PTHREAD
#if HAVE_THREAD
thread_mutex_destroy(&msg_lock);
thread_mutex_destroy(&memory_lock);
#endif
@ -1539,10 +1539,10 @@ int smartctl_flush(FILE* f, const char* file, const char* name)
/****************************************************************************/
/* thread */
#if HAVE_PTHREAD
void thread_mutex_init(pthread_mutex_t* mutex, pthread_mutexattr_t* attr)
#if HAVE_THREAD
void thread_mutex_init(thread_mutex_t* mutex)
{
if (pthread_mutex_init(mutex, attr) != 0) {
if (pthread_mutex_init(mutex, 0) != 0) {
/* LCOV_EXCL_START */
log_fatal("Failed call to pthread_mutex_init().\n");
os_abort();
@ -1550,7 +1550,7 @@ void thread_mutex_init(pthread_mutex_t* mutex, pthread_mutexattr_t* attr)
}
}
void thread_mutex_destroy(pthread_mutex_t* mutex)
void thread_mutex_destroy(thread_mutex_t* mutex)
{
if (pthread_mutex_destroy(mutex) != 0) {
/* LCOV_EXCL_START */
@ -1560,7 +1560,7 @@ void thread_mutex_destroy(pthread_mutex_t* mutex)
}
}
void thread_mutex_lock(pthread_mutex_t* mutex)
void thread_mutex_lock(thread_mutex_t* mutex)
{
if (pthread_mutex_lock(mutex) != 0) {
/* LCOV_EXCL_START */
@ -1570,7 +1570,7 @@ void thread_mutex_lock(pthread_mutex_t* mutex)
}
}
void thread_mutex_unlock(pthread_mutex_t* mutex)
void thread_mutex_unlock(thread_mutex_t* mutex)
{
if (pthread_mutex_unlock(mutex) != 0) {
/* LCOV_EXCL_START */
@ -1580,9 +1580,9 @@ void thread_mutex_unlock(pthread_mutex_t* mutex)
}
}
void thread_cond_init(pthread_cond_t* cond, pthread_condattr_t* attr)
void thread_cond_init(thread_cond_t* cond)
{
if (pthread_cond_init(cond, attr) != 0) {
if (pthread_cond_init(cond, 0) != 0) {
/* LCOV_EXCL_START */
log_fatal("Failed call to pthread_cond_init().\n");
os_abort();
@ -1590,7 +1590,7 @@ void thread_cond_init(pthread_cond_t* cond, pthread_condattr_t* attr)
}
}
void thread_cond_destroy(pthread_cond_t* cond)
void thread_cond_destroy(thread_cond_t* cond)
{
if (pthread_cond_destroy(cond) != 0) {
/* LCOV_EXCL_START */
@ -1600,7 +1600,7 @@ void thread_cond_destroy(pthread_cond_t* cond)
}
}
void thread_cond_signal(pthread_cond_t* cond)
void thread_cond_signal(thread_cond_t* cond)
{
if (pthread_cond_signal(cond) != 0) {
/* LCOV_EXCL_START */
@ -1610,7 +1610,7 @@ void thread_cond_signal(pthread_cond_t* cond)
}
}
void thread_cond_broadcast(pthread_cond_t* cond)
void thread_cond_broadcast(thread_cond_t* cond)
{
if (pthread_cond_broadcast(cond) != 0) {
/* LCOV_EXCL_START */
@ -1620,7 +1620,7 @@ void thread_cond_broadcast(pthread_cond_t* cond)
}
}
void thread_cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex)
void thread_cond_wait(thread_cond_t* cond, thread_mutex_t* mutex)
{
if (pthread_cond_wait(cond, mutex) != 0) {
/* LCOV_EXCL_START */
@ -1659,7 +1659,7 @@ void thread_cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex)
*/
int thread_cond_signal_outside = 0;
void thread_cond_signal_and_unlock(pthread_cond_t* cond, pthread_mutex_t* mutex)
void thread_cond_signal_and_unlock(thread_cond_t* cond, thread_mutex_t* mutex)
{
if (thread_cond_signal_outside) {
/* without the thread checker unlock before signaling, */
@ -1676,7 +1676,7 @@ void thread_cond_signal_and_unlock(pthread_cond_t* cond, pthread_mutex_t* mutex)
}
}
void thread_cond_broadcast_and_unlock(pthread_cond_t* cond, pthread_mutex_t* mutex)
void thread_cond_broadcast_and_unlock(thread_cond_t* cond, thread_mutex_t* mutex)
{
if (thread_cond_signal_outside) {
/* without the thread checker unlock before signaling, */
@ -1693,9 +1693,9 @@ void thread_cond_broadcast_and_unlock(pthread_cond_t* cond, pthread_mutex_t* mut
}
}
void thread_create(pthread_t* thread, pthread_attr_t* attr, void *(* func)(void *), void *arg)
void thread_create(thread_id_t* thread, void* (* func)(void *), void *arg)
{
if (pthread_create(thread, attr, func, arg) != 0) {
if (pthread_create(thread, 0, func, arg) != 0) {
/* LCOV_EXCL_START */
log_fatal("Failed call to pthread_create().\n");
os_abort();
@ -1703,7 +1703,7 @@ void thread_create(pthread_t* thread, pthread_attr_t* attr, void *(* func)(void
}
}
void thread_join(pthread_t thread, void** retval)
void thread_join(thread_id_t thread, void** retval)
{
if (pthread_join(thread, retval) != 0) {
/* LCOV_EXCL_START */

View File

@ -407,7 +407,7 @@ int smartctl_flush(FILE* f, const char* file, const char* name);
/****************************************************************************/
/* thread */
#if HAVE_PTHREAD
#if HAVE_THREAD
/**
* Control when to signal the condition variables.
*
@ -420,19 +420,19 @@ extern int thread_cond_signal_outside;
/**
* Thread wrappers to handle error conditions.
*/
void thread_mutex_init(pthread_mutex_t* mutex, pthread_mutexattr_t* attr);
void thread_mutex_destroy(pthread_mutex_t* mutex);
void thread_mutex_lock(pthread_mutex_t* mutex);
void thread_mutex_unlock(pthread_mutex_t* mutex);
void thread_cond_init(pthread_cond_t* cond, pthread_condattr_t* attr);
void thread_cond_destroy(pthread_cond_t* cond);
void thread_cond_signal(pthread_cond_t* cond);
void thread_cond_broadcast(pthread_cond_t* cond);
void thread_cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex);
void thread_cond_signal_and_unlock(pthread_cond_t* cond, pthread_mutex_t* mutex);
void thread_cond_broadcast_and_unlock(pthread_cond_t* cond, pthread_mutex_t* mutex);
void thread_create(pthread_t* thread, pthread_attr_t* attr, void *(* func)(void *), void *arg);
void thread_join(pthread_t thread, void** retval);
void thread_mutex_init(thread_mutex_t* mutex);
void thread_mutex_destroy(thread_mutex_t* mutex);
void thread_mutex_lock(thread_mutex_t* mutex);
void thread_mutex_unlock(thread_mutex_t* mutex);
void thread_cond_init(thread_cond_t* cond);
void thread_cond_destroy(thread_cond_t* cond);
void thread_cond_signal(thread_cond_t* cond);
void thread_cond_broadcast(thread_cond_t* cond);
void thread_cond_wait(thread_cond_t* cond, thread_mutex_t* mutex);
void thread_cond_signal_and_unlock(thread_cond_t* cond, thread_mutex_t* mutex);
void thread_cond_broadcast_and_unlock(thread_cond_t* cond, thread_mutex_t* mutex);
void thread_create(thread_id_t* thread, void* (* func)(void *), void *arg);
void thread_join(thread_id_t thread, void** retval);
#endif
#endif

View File

@ -440,9 +440,8 @@ struct snapraid_rehash {
/**
* Check if we have to process the specified block index ::i.
*/
static int block_is_enabled(void* void_plan, block_off_t i)
static int block_is_enabled(struct snapraid_plan* plan, block_off_t i)
{
struct snapraid_plan* plan = void_plan;
unsigned j;
int one_invalid;
int one_valid;
@ -693,6 +692,7 @@ static int state_sync_process(struct snapraid_state* state, struct snapraid_pari
unsigned* waiting_map;
unsigned waiting_mac;
char esc_buffer[ESC_MAX];
bit_vect_t* block_enabled;
/* the sync process assumes that all the hashes are correct */
/* including the ones from CHG and DELETED blocks */
@ -732,14 +732,18 @@ static int state_sync_process(struct snapraid_state* state, struct snapraid_pari
silent_error = 0;
io_error = 0;
msg_progress("Selecting...\n");
/* first count the number of blocks to process */
countmax = 0;
plan.handle_max = diskmax;
plan.handle_map = handle;
plan.force_full = state->opt.force_full;
block_enabled = calloc_nofail(1, bit_vect_size(blockmax)); /* preinitialize to 0 */
for (blockcur = blockstart; blockcur < blockmax; ++blockcur) {
if (!block_is_enabled(&plan, blockcur))
continue;
bit_vect_set(block_enabled, blockcur);
++countmax;
}
@ -756,8 +760,10 @@ static int state_sync_process(struct snapraid_state* state, struct snapraid_pari
countsize = 0;
countpos = 0;
msg_progress("Syncing...\n");
/* start all the worker threads */
io_start(&io, blockstart, blockmax, &block_is_enabled, &plan);
io_start(&io, blockstart, blockmax, block_enabled);
if (!state_progress_begin(state, blockstart, blockmax, countmax))
goto end;
@ -1413,6 +1419,7 @@ bail:
free(failed_map);
free(waiting_map);
io_done(&io);
free(block_enabled);
if (state->opt.expect_recoverable) {
if (error + silent_error + io_error == 0)
@ -1575,8 +1582,6 @@ int state_sync(struct snapraid_state* state, block_off_t blockstart, block_off_t
log_fatal("WARNING! Skipped state write for --test-skip-content-write option.\n");
}
msg_progress("Syncing...\n");
/* skip degenerated cases of empty parity, or skipping all */
if (blockstart < blockmax) {
ret = state_sync_process(state, parity_handle, blockstart, blockmax);

View File

@ -1325,12 +1325,12 @@ static int device_thread(tommy_list* list, void* (*func)(void* arg))
int fail = 0;
tommy_node* i;
#if HAVE_PTHREAD
#if HAVE_THREAD
/* start all threads */
for (i = tommy_list_head(list); i != 0; i = i->next) {
devinfo_t* devinfo = i->data;
thread_create(&devinfo->thread, 0, func, devinfo);
thread_create(&devinfo->thread, func, devinfo);
}
/* join all threads */

View File

@ -18,6 +18,7 @@
#ifndef __UTIL_H
#define __UTIL_H
/****************************************************************************/
/* memory */
@ -221,5 +222,34 @@ int lock_lock(const char* file);
*/
int lock_unlock(int f);
/****************************************************************************/
/* bitvect */
typedef unsigned char bit_vect_t;
#define BIT_VECT_SIZE (sizeof(bit_vect_t) * 8)
static inline size_t bit_vect_size(size_t max)
{
return (max + BIT_VECT_SIZE - 1) / BIT_VECT_SIZE;
}
static inline void bit_vect_set(bit_vect_t* bit_vect, size_t off)
{
bit_vect_t mask = 1 << (off % BIT_VECT_SIZE);
bit_vect[off / BIT_VECT_SIZE] |= mask;
}
static inline void bit_vect_clear(bit_vect_t* bit_vect, size_t off)
{
bit_vect_t mask = 1 << (off % BIT_VECT_SIZE);
bit_vect[off / BIT_VECT_SIZE] &= ~mask;
}
static inline int bit_vect_test(bit_vect_t* bit_vect, size_t off)
{
bit_vect_t mask = 1 << (off % BIT_VECT_SIZE);
return (bit_vect[off / BIT_VECT_SIZE] & mask) != 0;
}
#endif

20
configure vendored
View File

@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for snapraid 11.5.
# Generated by GNU Autoconf 2.69 for snapraid 11.6.
#
#
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@ -577,8 +577,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='snapraid'
PACKAGE_TARNAME='snapraid'
PACKAGE_VERSION='11.5'
PACKAGE_STRING='snapraid 11.5'
PACKAGE_VERSION='11.6'
PACKAGE_STRING='snapraid 11.6'
PACKAGE_BUGREPORT=''
PACKAGE_URL='http://www.snapraid.it'
@ -1304,7 +1304,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
\`configure' configures snapraid 11.5 to adapt to many kinds of systems.
\`configure' configures snapraid 11.6 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@ -1374,7 +1374,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
short | recursive ) echo "Configuration of snapraid 11.5:";;
short | recursive ) echo "Configuration of snapraid 11.6:";;
esac
cat <<\_ACEOF
@ -1496,7 +1496,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
snapraid configure 11.5
snapraid configure 11.6
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@ -2102,7 +2102,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by snapraid $as_me 11.5, which was
It was created by snapraid $as_me 11.6, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@ -2965,7 +2965,7 @@ fi
# Define the identity of the package.
PACKAGE='snapraid'
VERSION='11.5'
VERSION='11.6'
cat >>confdefs.h <<_ACEOF
@ -7339,7 +7339,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by snapraid $as_me 11.5, which was
This file was extended by snapraid $as_me 11.6, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@ -7402,7 +7402,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
snapraid config.status 11.5
snapraid config.status 11.6
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"

View File

@ -1041,9 +1041,9 @@ Note that it cannot be used with \[dq]sync\[dq] and \[dq]scrub\[dq], because the
process the whole array.
.TP
.B \-e, \-\-filter\-error
Filters the blocks to process in \[dq]check\[dq] and \[dq]fix\[dq].
It processes only the blocks marked with silent or input/output
errors during \[dq]sync\[dq] and \[dq]scrub\[dq], and listed in \[dq]status\[dq].
Process the files with errors in \[dq]check\[dq] and \[dq]fix\[dq].
It processes only files that have blocks marked with silent
or input/output errors during \[dq]sync\[dq] and \[dq]scrub\[dq], and listed in \[dq]status\[dq].
This option can be used only with \[dq]check\[dq] and \[dq]fix\[dq].
.TP
.B \-p, \-\-plan PERC|bad|new|full
@ -1152,10 +1152,10 @@ option allows to resolve them.
This option can be used only with \[dq]sync\[dq], \[dq]check\[dq] and \[dq]fix\[dq].
.TP
.B \-F, \-\-force\-full
In \[dq]sync\[dq] forces a full rebuild of the parity.
In \[dq]sync\[dq] forces a full recomputation of the parity.
This option can be used when you add a new parity level, or if
you reverted back to an old content file using a more recent parity data.
Instead of recomputing the parity from scratch, this allows
Instead of recreating the parity from scratch, this allows
to reuse the hashes present in the content file to validate data,
and to maintain data protection during the \[dq]sync\[dq] process using
the parity data you have.
@ -1166,9 +1166,10 @@ In \[dq]sync\[dq] forces a full reallocation of files and rebuild of the parity.
This option can be used to completely reallocate all the files
removing the fragmentation, but reusing the hashes present in the content
file to validate data.
Compared to \-F, \-\-force\-full, this option reallocates all the parity
not having data protection during the operation.
This option can be used only with \[dq]sync\[dq].
WARNING! This option is for experts only, and it\'s highly
recommended to not use it.
You DO NOT have data protection during the \[dq]sync\[dq] operation.
.TP
.B \-l, \-\-log FILE
Write a detailed log in the specified file.

View File

@ -737,9 +737,9 @@ Options
process the whole array.
-e, --filter-error
Filters the blocks to process in "check" and "fix".
It processes only the blocks marked with silent or input/output
errors during "sync" and "scrub", and listed in "status".
Process the files with errors in "check" and "fix".
It processes only files that have blocks marked with silent
or input/output errors during "sync" and "scrub", and listed in "status".
This option can be used only with "check" and "fix".
-p, --plan PERC|bad|new|full
@ -848,10 +848,10 @@ Options
This option can be used only with "sync", "check" and "fix".
-F, --force-full
In "sync" forces a full rebuild of the parity.
In "sync" forces a full recomputation of the parity.
This option can be used when you add a new parity level, or if
you reverted back to an old content file using a more recent parity data.
Instead of recomputing the parity from scratch, this allows
Instead of recreating the parity from scratch, this allows
to reuse the hashes present in the content file to validate data,
and to maintain data protection during the "sync" process using
the parity data you have.
@ -862,9 +862,10 @@ Options
This option can be used to completely reallocate all the files
removing the fragmentation, but reusing the hashes present in the content
file to validate data.
Compared to -F, --force-full, this option reallocates all the parity
not having data protection during the operation.
This option can be used only with "sync".
WARNING! This option is for experts only, and it's highly
recommended to not use it.
You DO NOT have data protection during the "sync" operation.
-l, --log FILE
Write a detailed log in the specified file.

View File

@ -794,9 +794,9 @@ SnapRAID provides the following options:
process the whole array.
-e, --filter-error
Filters the blocks to process in "check" and "fix".
It processes only the blocks marked with silent or input/output
errors during "sync" and "scrub", and listed in "status".
Process the files with errors in "check" and "fix".
It processes only files that have blocks marked with silent
or input/output errors during "sync" and "scrub", and listed in "status".
This option can be used only with "check" and "fix".
-p, --plan PERC|bad|new|full
@ -905,10 +905,10 @@ SnapRAID provides the following options:
This option can be used only with "sync", "check" and "fix".
-F, --force-full
In "sync" forces a full rebuild of the parity.
In "sync" forces a full recomputation of the parity.
This option can be used when you add a new parity level, or if
you reverted back to an old content file using a more recent parity data.
Instead of recomputing the parity from scratch, this allows
Instead of recreating the parity from scratch, this allows
to reuse the hashes present in the content file to validate data,
and to maintain data protection during the "sync" process using
the parity data you have.
@ -919,9 +919,10 @@ SnapRAID provides the following options:
This option can be used to completely reallocate all the files
removing the fragmentation, but reusing the hashes present in the content
file to validate data.
Compared to -F, --force-full, this option reallocates all the parity
not having data protection during the operation.
This option can be used only with "sync".
WARNING! This option is for experts only, and it's highly
recommended to not use it.
You DO NOT have data protection during the "sync" operation.
-l, --log FILE
Write a detailed log in the specified file.