Imported Upstream version 0.9.1

This commit is contained in:
Mario Fetka 2017-03-24 11:42:13 +01:00
commit 884f5414c2
31 changed files with 5831 additions and 0 deletions

71
CMakeLists.txt Normal file
View File

@ -0,0 +1,71 @@
cmake_minimum_required(VERSION 2.6)
project(mydumper)
set(VERSION 0.9.1)
set(ARCHIVE_NAME "${CMAKE_PROJECT_NAME}-${VERSION}")
#Required packages
set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules)
find_package(MySQL)
find_package(ZLIB)
find_package(GLIB2)
find_package(PCRE)
option(BUILD_DOCS "Build the documentation" ON)
if (BUILD_DOCS)
add_subdirectory(docs)
endif (BUILD_DOCS)
option(WITH_BINLOG "Build binlog dump options" OFF)
set(CMAKE_C_FLAGS "-Wall -Wno-deprecated-declarations -Wunused -Wwrite-strings -Wno-strict-aliasing -Wextra -Wshadow -Werror -O3 -g ${MYSQL_CFLAGS}")
include_directories(${MYDUMPER_SOURCE_DIR} ${MYSQL_INCLUDE_DIR} ${GLIB2_INCLUDE_DIR} ${PCRE_INCLUDE_DIR} ${ZLIB_INCLUDE_DIRS})
if (NOT CMAKE_INSTALL_PREFIX)
SET(CMAKE_INSTALL_PREFIX "/usr/local" CACHE STRING "Install path" FORCE)
endif (NOT CMAKE_INSTALL_PREFIX)
MARK_AS_ADVANCED(CMAKE)
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in ${CMAKE_CURRENT_SOURCE_DIR}/config.h)
if (WITH_BINLOG)
add_executable(mydumper mydumper.c binlog.c server_detect.c g_unix_signal.c)
else (WITH_BINLOG)
add_executable(mydumper mydumper.c server_detect.c g_unix_signal.c)
endif (WITH_BINLOG)
target_link_libraries(mydumper ${MYSQL_LIBRARIES} ${GLIB2_LIBRARIES} ${GTHREAD2_LIBRARIES} ${PCRE_PCRE_LIBRARY} ${ZLIB_LIBRARIES})
add_executable(myloader myloader.c)
target_link_libraries(myloader ${MYSQL_LIBRARIES} ${GLIB2_LIBRARIES} ${GTHREAD2_LIBRARIES} ${PCRE_PCRE_LIBRARY} ${ZLIB_LIBRARIES})
INSTALL(TARGETS mydumper myloader
RUNTIME DESTINATION bin
)
add_custom_target(dist
COMMAND bzr export --root=${ARCHIVE_NAME}
${CMAKE_BINARY_DIR}/${ARCHIVE_NAME}.tar.gz
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR})
OPTION(RUN_CPPCHECK "Run cppcheck" OFF)
IF(RUN_CPPCHECK)
include(CppcheckTargets)
add_cppcheck(mydumper)
add_cppcheck(myloader)
ENDIF(RUN_CPPCHECK)
MESSAGE(STATUS "------------------------------------------------")
MESSAGE(STATUS "MYSQL_CONFIG = ${MYSQL_CONFIG}")
MESSAGE(STATUS "CMAKE_INSTALL_PREFIX = ${CMAKE_INSTALL_PREFIX}")
MESSAGE(STATUS "BUILD_DOCS = ${BUILD_DOCS}")
MESSAGE(STATUS "WITH_BINLOG = ${WITH_BINLOG}")
MESSAGE(STATUS "RUN_CPPCHECK = ${RUN_CPPCHECK}")
MESSAGE(STATUS "Change a values with: cmake -D<Variable>=<Value>")
MESSAGE(STATUS "------------------------------------------------")
MESSAGE(STATUS)

48
README Normal file
View File

@ -0,0 +1,48 @@
== What is mydumper? Why? ==
* Parallelism (hence, speed) and performance (avoids expensive character set conversion routines, efficient code overall)
* Easier to manage output (separate files for tables, dump metadata, etc, easy to view/parse data)
* Consistency - maintains snapshot across all threads, provides accurate master and slave log positions, etc
* Manageability - supports PCRE for specifying database and tables inclusions and exclusions
== How to build it? ==
Run:
cmake .
make
One needs to install development versions of required libaries (MySQL, GLib, ZLib, PCRE):
NOTE: you must use the correspondent mysql devel package.
* Ubuntu or Debian: apt-get install libglib2.0-dev libmysqlclient15-dev zlib1g-dev libpcre3-dev libssl-dev
* Fedora, RedHat and CentOS: yum install glib2-devel mysql-devel zlib-devel pcre-devel openssl-devel
* openSUSE: zypper install glib2-devel libmysqlclient-devel pcre-devel zlib-devel
* MacOSX: port install glib2 mysql5 pcre pkgconfig cmake
(You may want to run 'port select mysql mysql5' afterwards)
One has to make sure, that pkg-config, mysql_config, pcre-config are all in $PATH
Binlog dump is disabled by default to compile with it you need to add -DWITH_BINLOG=ON to cmake options
== How does consistent snapshot work? ==
This is all done following best MySQL practices and traditions:
* As a precaution, slow running queries on the server either abort the dump, or get killed
* Global write lock is acquired ("FLUSH TABLES WITH READ LOCK")
* Various metadata is read ("SHOW SLAVE STATUS","SHOW MASTER STATUS")
* Other threads connect and establish snapshots ("START TRANSACTION WITH CONSISTENT SNAPSHOT")
** On pre-4.1.8 it creates dummy InnoDB table, and reads from it.
* Once all worker threads announce the snapshot establishment, master executes "UNLOCK TABLES" and starts queueing jobs.
This for now does not provide consistent snapshots for non-transactional engines - support for that is expected in 0.2 :)
== How to exclude (or include) databases? ==
Once can use --regex functionality, for example not to dump mysql and test databases:
mydumper --regex '^(?!(mysql|test))'
Of course, regex functionality can be used to describe pretty much any list of tables.

284
binlog.c Normal file
View File

@ -0,0 +1,284 @@
/*
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Authors: Domas Mituzas, Facebook ( domas at fb dot com )
Mark Leith, Oracle Corporation (mark dot leith at oracle dot com)
Andrew Hutchings, SkySQL (andrew at skysql dot com)
*/
#include <glib.h>
#include <glib/gstdio.h>
#include <my_global.h>
#include <mysql.h>
#include <my_sys.h>
#include <mysqld_error.h>
#include <sql_common.h>
#include <string.h>
#include <zlib.h>
#include "mydumper.h"
#include "binlog.h"
#define BINLOG_MAGIC "\xfe\x62\x69\x6e"
#define EVENT_HEADER_LENGTH 19
#define EVENT_ROTATE_FIXED_LENGTH 8
enum event_postions {
EVENT_TIMESTAMP_POSITION= 0,
EVENT_TYPE_POSITION= 4,
EVENT_SERVERID_POSITION= 5,
EVENT_LENGTH_POSITION= 9,
EVENT_NEXT_POSITION= 13,
EVENT_FLAGS_POSITION= 17,
EVENT_EXTRA_FLAGS_POSITION= 19 // currently unused in v4 binlogs, but a good marker for end of header
};
enum event_type {
ROTATE_EVENT= 4,
FORMAT_DESCRIPTION_EVENT= 15,
EVENT_TOO_SHORT= 254 // arbitrary high number, in 5.1 the max event type number is 27 so this should be fine for a while
};
extern int compress_output;
extern gboolean daemon_mode;
extern gboolean shutdown_triggered;
FILE *new_binlog_file(char *binlog_file, const char *binlog_dir);
void close_binlog_file(FILE *outfile);
char *rotate_file_name(const char *buf);
void get_binlogs(MYSQL *conn, struct configuration *conf) {
// TODO: find logs we already have, use start position based on position of last log.
MYSQL_RES *result;
MYSQL_ROW row;
char* last_filename = NULL;
guint64 last_position;
// Only snapshot dump the binlogs once in daemon mode
static gboolean got_binlogs= FALSE;
if (got_binlogs)
return;
else
got_binlogs= TRUE;
if (mysql_query(conn, "SHOW MASTER STATUS")) {
g_critical("Error: Could not execute query: %s", mysql_error(conn));
return;
}
result = mysql_store_result(conn);
if ((row = mysql_fetch_row(result))) {
last_filename= g_strdup(row[0]);
last_position= strtoll(row[1], NULL, 10);
} else {
g_critical("Error: Could not obtain binary log stop position");
if (last_filename != NULL)
g_free(last_filename);
return;
}
mysql_free_result(result);
if (mysql_query(conn, "SHOW BINARY LOGS")) {
g_critical("Error: Could not execute query: %s", mysql_error(conn));
if (last_filename != NULL)
g_free(last_filename);
return;
}
result = mysql_store_result(conn);
while ((row = mysql_fetch_row(result))) {
struct job *j = g_new0(struct job,1);
struct binlog_job *bj = g_new0(struct binlog_job,1);
j->job_data=(void*) bj;
bj->filename=g_strdup(row[0]);
bj->start_position=4;
bj->stop_position= (!strcasecmp(row[0], last_filename)) ? last_position : 0;
j->conf=conf;
j->type=JOB_BINLOG;
g_async_queue_push(conf->queue,j);
}
mysql_free_result(result);
if (last_filename != NULL)
g_free(last_filename);
}
void get_binlog_file(MYSQL *conn, char *binlog_file, const char *binlog_directory, guint64 start_position, guint64 stop_position, gboolean continuous) {
// set serverID = max serverID - threadID to try an eliminate conflicts,
// 0 is bad because mysqld will disconnect at the end of the last log
// dupes aren't too bad since it is up to the client to check for them
uchar buf[128];
// We need to read the raw network packets
NET* net;
net= &conn->net;
unsigned long len;
FILE* outfile;
guint32 event_type;
gboolean read_error= FALSE;
gboolean read_end= FALSE;
gboolean rotated= FALSE;
guint32 server_id= G_MAXUINT32 - mysql_thread_id(conn);
guint64 pos_counter= 0;
int4store(buf, (guint32)start_position);
// Binlog flags (2 byte int)
int2store(buf + 4, 0);
// ServerID
int4store(buf + 6, server_id);
memcpy(buf + 10, binlog_file, strlen(binlog_file));
#if MYSQL_VERSION_ID < 50100
if (simple_command(conn, COM_BINLOG_DUMP, (const char *)buf,
#else
if (simple_command(conn, COM_BINLOG_DUMP, buf,
#endif
strlen(binlog_file) + 10, 1)) {
g_critical("Error: binlog: Critical error whilst requesting binary log");
}
while(1) {
outfile= new_binlog_file(binlog_file, binlog_directory);
if (outfile == NULL) {
g_critical("Error: binlog: Could not create binlog file '%s', %d", binlog_file, errno);
return;
}
write_binlog(outfile, BINLOG_MAGIC, 4);
while(1) {
len = 0;
if (net->vio != 0) len=my_net_read(net);
if ((len == 0) || (len == ~(unsigned long) 0)) {
// Net timeout (set to 1 second)
if (mysql_errno(conn) == ER_NET_READ_INTERRUPTED) {
if (shutdown_triggered) {
close_binlog_file(outfile);
return;
} else {
continue;
}
// A real error
} else {
g_critical("Error: binlog: Network packet read error getting binlog file: %s", binlog_file);
close_binlog_file(outfile);
return;
}
}
if (len < 8 && net->read_pos[0]) {
// end of data
break;
}
pos_counter += len;
event_type= get_event((const char*)net->read_pos + 1, len -1);
switch (event_type) {
case EVENT_TOO_SHORT:
g_critical("Error: binlog: Event too short in binlog file: %s", binlog_file);
read_error= TRUE;
break;
case ROTATE_EVENT:
if (rotated) {
read_end= TRUE;
} else {
len= 1;
rotated= TRUE;
}
break;
default:
// if we get this far this is a normal event to record
break;
}
if (read_error) break;
write_binlog(outfile, (const char*)net->read_pos + 1, len - 1);
if (read_end) {
if (!continuous) {
break;
} else {
g_free(binlog_file);
binlog_file= rotate_file_name((const char*)net->read_pos + 1);
break;
}
}
// stop if we are at requested end of last log
if ((stop_position > 0) && (pos_counter >= stop_position)) break;
}
close_binlog_file(outfile);
if ((!continuous) || (!read_end)) break;
if (continuous && read_end) {
read_end= FALSE;
rotated= FALSE;
}
}
}
char *rotate_file_name(const char *buf) {
guint32 event_length= 0;
// event length is 4 bytes at position 9
event_length= uint4korr(&buf[EVENT_LENGTH_POSITION]);
// event length includes the header, plus a rotate event has a fixed 8byte part we don't need
event_length= event_length - EVENT_HEADER_LENGTH - EVENT_ROTATE_FIXED_LENGTH;
return g_strndup(&buf[EVENT_HEADER_LENGTH + EVENT_ROTATE_FIXED_LENGTH], event_length);
}
FILE *new_binlog_file(char *binlog_file, const char *binlog_dir) {
FILE *outfile;
char* filename;
if (!compress_output) {
filename= g_strdup_printf("%s/%s", binlog_dir, binlog_file);
outfile= g_fopen(filename, "w");
} else {
filename= g_strdup_printf("%s/%s.gz", binlog_dir, binlog_file);
outfile= (void*) gzopen(filename, "w");
}
g_free(filename);
return outfile;
}
void close_binlog_file(FILE *outfile) {
if (!compress_output)
fclose(outfile);
else
gzclose((gzFile) outfile);
}
unsigned int get_event(const char *buf, unsigned int len) {
if (len < EVENT_TYPE_POSITION)
return EVENT_TOO_SHORT;
return buf[EVENT_TYPE_POSITION];
// TODO: Would be good if we can check for valid event type, unfortunately this check can change from version to version
}
void write_binlog(FILE* file, const char* data, guint64 len) {
int err;
if (len > 0) {
int write_result;
if (!compress_output)
write_result= write(fileno(file), data, len);
else
write_result= gzwrite((gzFile)file, data, len);
if (write_result <= 0) {
if (!compress_output)
g_critical("Error: binlog: Error writing binary log: %s", strerror(errno));
else
g_critical("Error: binlog: Error writing compressed binary log: %s", gzerror((gzFile)file, &err));
}
}
}

30
binlog.h Normal file
View File

@ -0,0 +1,30 @@
/*
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Authors: Domas Mituzas, Facebook ( domas at fb dot com )
Mark Leith, Oracle Corporation (mark dot leith at oracle dot com)
Andrew Hutchings, SkySQL (andrew at skysql dot com)
*/
#ifndef _binlog_h
#define _binlog_h
#include "mydumper.h"
void get_binlogs(MYSQL *conn, struct configuration *conf);
void get_binlog_file(MYSQL *conn, char *binlog_file, const char *binlog_directory, guint64 start_position, guint64 stop_position, gboolean continuous);
unsigned int get_event(const char *buf, unsigned int len);
void write_binlog(FILE* file, const char* data, guint64 len);
#endif

View File

@ -0,0 +1,214 @@
# - Run cppcheck on c++ source files as a custom target and a test
#
# include(CppcheckTargets)
# add_cppcheck(<target-name> [UNUSED_FUNCTIONS] [STYLE] [POSSIBLE_ERROR] [FAIL_ON_WARNINGS]) -
# Create a target to check a target's sources with cppcheck and the indicated options
# add_cppcheck_sources(<target-name> [UNUSED_FUNCTIONS] [STYLE] [POSSIBLE_ERROR] [FAIL_ON_WARNINGS]) -
# Create a target to check standalone sources with cppcheck and the indicated options
#
# Requires these CMake modules:
# Findcppcheck
#
# Requires CMake 2.6 or newer (uses the 'function' command)
#
# Original Author:
# 2009-2010 Ryan Pavlik <rpavlik@iastate.edu> <abiryan@ryand.net>
# http://academic.cleardefinition.com
# Iowa State University HCI Graduate Program/VRAC
#
# Copyright Iowa State University 2009-2010.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
if(__add_cppcheck)
return()
endif()
set(__add_cppcheck YES)
if(NOT CPPCHECK_FOUND)
find_package(cppcheck QUIET)
endif()
if(CPPCHECK_FOUND)
if(NOT TARGET all_cppcheck)
add_custom_target(all_cppcheck)
set_target_properties(all_cppcheck PROPERTIES EXCLUDE_FROM_ALL TRUE)
endif()
endif()
function(add_cppcheck_sources _targetname)
if(CPPCHECK_FOUND)
set(_cppcheck_args)
set(_input ${ARGN})
list(FIND _input UNUSED_FUNCTIONS _unused_func)
if("${_unused_func}" GREATER "-1")
list(APPEND _cppcheck_args ${CPPCHECK_UNUSEDFUNC_ARG})
list(REMOVE_AT _input ${_unused_func})
endif()
list(FIND _input STYLE _style)
if("${_style}" GREATER "-1")
list(APPEND _cppcheck_args ${CPPCHECK_STYLE_ARG})
list(REMOVE_AT _input ${_style})
endif()
list(FIND _input POSSIBLE_ERROR _poss_err)
if("${_poss_err}" GREATER "-1")
list(APPEND _cppcheck_args ${CPPCHECK_POSSIBLEERROR_ARG})
list(REMOVE_AT _input ${_poss_err})
endif()
list(FIND _input FAIL_ON_WARNINGS _fail_on_warn)
if("${_fail_on_warn}" GREATER "-1")
list(APPEND
CPPCHECK_FAIL_REGULAR_EXPRESSION
${CPPCHECK_WARN_REGULAR_EXPRESSION})
list(REMOVE_AT _input ${_fail_on_warn})
endif()
set(_files)
foreach(_source ${_input})
get_source_file_property(_cppcheck_loc "${_source}" LOCATION)
if(_cppcheck_loc)
# This file has a source file property, carry on.
get_source_file_property(_cppcheck_lang "${_source}" LANGUAGE)
if("${_cppcheck_lang}" MATCHES "C")
list(APPEND _files "${_cppcheck_loc}")
endif()
else()
# This file doesn't have source file properties - figure it out.
get_filename_component(_cppcheck_loc "${_source}" ABSOLUTE)
if(EXISTS "${_cppcheck_loc}")
list(APPEND _files "${_cppcheck_loc}")
else()
message(FATAL_ERROR
"Adding CPPCHECK for file target ${_targetname}: "
"File ${_source} does not exist or needs a corrected path location "
"since we think its absolute path is ${_cppcheck_loc}")
endif()
endif()
endforeach()
if("1.${CMAKE_VERSION}" VERSION_LESS "1.2.8.0")
# Older than CMake 2.8.0
add_test(${_targetname}_cppcheck_test
"${CPPCHECK_EXECUTABLE}"
${CPPCHECK_TEMPLATE_ARG}
${_cppcheck_args}
${_files})
else()
# CMake 2.8.0 and newer
add_test(NAME
${_targetname}_cppcheck_test
COMMAND
"${CPPCHECK_EXECUTABLE}"
${CPPCHECK_TEMPLATE_ARG}
${_cppcheck_args}
${_files})
endif()
set_tests_properties(${_targetname}_cppcheck_test
PROPERTIES
FAIL_REGULAR_EXPRESSION
"${CPPCHECK_FAIL_REGULAR_EXPRESSION}")
add_custom_command(TARGET
all_cppcheck
PRE_BUILD
COMMAND
${CPPCHECK_EXECUTABLE}
${CPPCHECK_QUIET_ARG}
${CPPCHECK_TEMPLATE_ARG}
${_cppcheck_args}
${_files}
WORKING_DIRECTORY
"${CMAKE_CURRENT_SOURCE_DIR}"
COMMENT
"${_targetname}_cppcheck: Running cppcheck on target ${_targetname}..."
VERBATIM)
endif()
endfunction()
function(add_cppcheck _name)
if(NOT TARGET ${_name})
message(FATAL_ERROR
"add_cppcheck given a target name that does not exist: '${_name}' !")
endif()
if(CPPCHECK_FOUND)
set(_cppcheck_args)
list(FIND ARGN UNUSED_FUNCTIONS _unused_func)
if("${_unused_func}" GREATER "-1")
list(APPEND _cppcheck_args ${CPPCHECK_UNUSEDFUNC_ARG})
endif()
list(FIND ARGN STYLE _style)
if("${_style}" GREATER "-1")
list(APPEND _cppcheck_args ${CPPCHECK_STYLE_ARG})
endif()
list(FIND ARGN POSSIBLE_ERROR _poss_err)
if("${_poss_err}" GREATER "-1")
list(APPEND _cppcheck_args ${CPPCHECK_POSSIBLEERROR_ARG})
endif()
list(FIND _input FAIL_ON_WARNINGS _fail_on_warn)
if("${_fail_on_warn}" GREATER "-1")
list(APPEND
CPPCHECK_FAIL_REGULAR_EXPRESSION
${CPPCHECK_WARN_REGULAR_EXPRESSION})
list(REMOVE_AT _input ${_unused_func})
endif()
get_target_property(_cppcheck_sources "${_name}" SOURCES)
set(_files)
foreach(_source ${_cppcheck_sources})
get_source_file_property(_cppcheck_lang "${_source}" LANGUAGE)
get_source_file_property(_cppcheck_loc "${_source}" LOCATION)
if("${_cppcheck_lang}" MATCHES "C")
list(APPEND _files "${_cppcheck_loc}")
endif()
endforeach()
if("1.${CMAKE_VERSION}" VERSION_LESS "1.2.8.0")
# Older than CMake 2.8.0
add_test(${_name}_cppcheck_test
"${CPPCHECK_EXECUTABLE}"
${CPPCHECK_TEMPLATE_ARG}
${_cppcheck_args}
${_files})
else()
# CMake 2.8.0 and newer
add_test(NAME
${_name}_cppcheck_test
COMMAND
"${CPPCHECK_EXECUTABLE}"
${CPPCHECK_TEMPLATE_ARG}
${_cppcheck_args}
${_files})
endif()
set_tests_properties(${_name}_cppcheck_test
PROPERTIES
FAIL_REGULAR_EXPRESSION
"${CPPCHECK_FAIL_REGULAR_EXPRESSION}")
add_custom_command(TARGET
all_cppcheck
PRE_BUILD
COMMAND
${CPPCHECK_EXECUTABLE}
${CPPCHECK_QUIET_ARG}
${CPPCHECK_TEMPLATE_ARG}
"--enable=style,information,unusedFunction"
${_cppcheck_args}
${_files}
WORKING_DIRECTORY
"${CMAKE_CURRENT_SOURCE_DIR}"
COMMENT
"${_name}_cppcheck: Running cppcheck on target ${_name}..."
VERBATIM)
endif()
endfunction()

View File

@ -0,0 +1,22 @@
# - Try to find the GLIB2 libraries
if(GLIB2_INCLUDE_DIR AND GLIB2_LIBRARIES AND GTHREAD2_LIBRARIES)
# Already in cache, be silent
set(GLIB2_FIND_QUIETLY TRUE)
endif(GLIB2_INCLUDE_DIR AND GLIB2_LIBRARIES AND GTHREAD2_LIBRARIES)
if (NOT WIN32)
include(FindPkgConfig)
pkg_search_module(PC_GLIB2 REQUIRED glib-2.0)
pkg_search_module(PC_GTHREAD2 REQUIRED gthread-2.0)
endif(NOT WIN32)
set(GLIB2_INCLUDE_DIR ${PC_GLIB2_INCLUDE_DIRS})
find_library(GLIB2_LIBRARIES NAMES glib-2.0 HINTS ${PC_GLIB2_LIBDIR} ${PC_GLIB2_LIBRARY_DIRS})
find_library(GTHREAD2_LIBRARIES NAMES gthread-2.0 HINTS ${PC_GTHREAD2_LIBDIR} ${PC_GTHREAD2_LIBRARY_DIRS})
mark_as_advanced(GLIB2_INCLUDE_DIR GLIB2_LIBRARIES GTHREAD2_LIBRARIES)

View File

@ -0,0 +1,111 @@
# - Find MySQL
# Find the MySQL includes and client library
# This module defines
# MYSQL_INCLUDE_DIR, where to find mysql.h
# MYSQL_LIBRARIES, the libraries needed to use MySQL.
# MYSQL_FOUND, If false, do not try to use MySQL.
#
# Copyright (c) 2006, Jaroslaw Staniek, <js@iidea.pl>
# Lot of adustmens by Michal Cihar <michal@cihar.com>
#
# vim: expandtab sw=4 ts=4 sts=4:
#
# Redistribution and use is allowed according to the terms of the BSD license.
if(UNIX)
set(MYSQL_CONFIG_PREFER_PATH "$ENV{MYSQL_HOME}/bin" CACHE FILEPATH
"preferred path to MySQL (mysql_config)")
find_program(MYSQL_CONFIG mysql_config
${MYSQL_CONFIG_PREFER_PATH}
/usr/local/mysql/bin/
/usr/local/bin/
/usr/bin/
)
if(MYSQL_CONFIG)
message(STATUS "Using mysql-config: ${MYSQL_CONFIG}")
# set CFLAGS
exec_program(${MYSQL_CONFIG}
ARGS --cflags
OUTPUT_VARIABLE MY_TMP)
set(MYSQL_CFLAGS ${MY_TMP} CACHE STRING INTERNAL)
# set INCLUDE_DIR
exec_program(${MYSQL_CONFIG}
ARGS --include
OUTPUT_VARIABLE MY_TMP)
string(REGEX REPLACE "-I([^ ]*)( .*)?" "\\1" MY_TMP "${MY_TMP}")
set(MYSQL_ADD_INCLUDE_DIR ${MY_TMP} CACHE FILEPATH INTERNAL)
# set LIBRARY_DIR
exec_program(${MYSQL_CONFIG}
ARGS --libs_r
OUTPUT_VARIABLE MY_TMP)
set(MYSQL_ADD_LIBRARIES "")
# prepend space in order to match separate words only (e.g. rather
# than "-linux" from within "-L/usr/lib/i386-linux-gnu")
string(REGEX MATCHALL " +-l[^ ]*" MYSQL_LIB_LIST " ${MY_TMP}")
foreach(MY_LIB ${MYSQL_LIB_LIST})
string(REGEX REPLACE "[ ]*-l([^ ]*)" "\\1" MY_LIB "${MY_LIB}")
list(APPEND MYSQL_ADD_LIBRARIES "${MY_LIB}")
endforeach(MY_LIB ${MYSQL_LIBS})
set(MYSQL_ADD_LIBRARY_PATH "")
string(REGEX MATCHALL " +-L[^ ]*" MYSQL_LIBDIR_LIST " ${MY_TMP}")
foreach(MY_LIB ${MYSQL_LIBDIR_LIST})
string(REGEX REPLACE "[ ]*-L([^ ]*)" "\\1" MY_LIB "${MY_LIB}")
list(APPEND MYSQL_ADD_LIBRARY_PATH "${MY_LIB}")
endforeach(MY_LIB ${MYSQL_LIBS})
else(MYSQL_CONFIG)
set(MYSQL_ADD_LIBRARIES "")
list(APPEND MYSQL_ADD_LIBRARIES "mysqlclient")
endif(MYSQL_CONFIG)
else(UNIX)
set(MYSQL_ADD_INCLUDE_DIR "c:/msys/local/include" CACHE FILEPATH INTERNAL)
set(MYSQL_ADD_LIBRARY_PATH "c:/msys/local/lib" CACHE FILEPATH INTERNAL)
ENDIF(UNIX)
find_path(MYSQL_INCLUDE_DIR mysql.h
${MYSQL_ADD_INCLUDE_DIR}
/usr/local/include
/usr/local/include/mysql
/usr/local/mysql/include
/usr/local/mysql/include/mysql
/usr/include
/usr/include/mysql
/usr/include/mysql/private
)
set(TMP_MYSQL_LIBRARIES "")
set(CMAKE_FIND_LIBRARY_SUFFIXES .so .a .lib)
foreach(MY_LIB ${MYSQL_ADD_LIBRARIES})
find_library("MYSQL_LIBRARIES_${MY_LIB}" NAMES ${MY_LIB}
HINTS
${MYSQL_ADD_LIBRARY_PATH}
/usr/lib/mysql
/usr/lib
/usr/local/lib
/usr/local/lib/mysql
/usr/local/mysql/lib
)
list(APPEND TMP_MYSQL_LIBRARIES "${MYSQL_LIBRARIES_${MY_LIB}}")
endforeach(MY_LIB ${MYSQL_ADD_LIBRARIES})
set(MYSQL_LIBRARIES ${TMP_MYSQL_LIBRARIES} CACHE FILEPATH INTERNAL)
if(MYSQL_INCLUDE_DIR AND MYSQL_LIBRARIES)
set(MYSQL_FOUND TRUE CACHE INTERNAL "MySQL found")
message(STATUS "Found MySQL: ${MYSQL_INCLUDE_DIR}, ${MYSQL_LIBRARIES}")
else(MYSQL_INCLUDE_DIR AND MYSQL_LIBRARIES)
set(MYSQL_FOUND FALSE CACHE INTERNAL "MySQL found")
message(STATUS "MySQL not found.")
endif(MYSQL_INCLUDE_DIR AND MYSQL_LIBRARIES)
mark_as_advanced(MYSQL_INCLUDE_DIR MYSQL_LIBRARIES MYSQL_CFLAGS)

View File

@ -0,0 +1,45 @@
# - Try to find the PCRE regular expression library
# Once done this will define
#
# PCRE_FOUND - system has the PCRE library
# PCRE_INCLUDE_DIR - the PCRE include directory
# PCRE_LIBRARIES - The libraries needed to use PCRE
# Copyright (c) 2006, Alexander Neundorf, <neundorf@kde.org>
#
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
if (PCRE_INCLUDE_DIR AND PCRE_PCREPOSIX_LIBRARY AND PCRE_PCRE_LIBRARY)
# Already in cache, be silent
set(PCRE_FIND_QUIETLY TRUE)
endif (PCRE_INCLUDE_DIR AND PCRE_PCREPOSIX_LIBRARY AND PCRE_PCRE_LIBRARY)
if (NOT WIN32)
# use pkg-config to get the directories and then use these values
# in the FIND_PATH() and FIND_LIBRARY() calls
find_package(PkgConfig)
pkg_check_modules(PC_PCRE REQUIRED libpcre)
set(PCRE_DEFINITIONS ${PC_PCRE_CFLAGS_OTHER})
endif (NOT WIN32)
find_path(PCRE_INCLUDE_DIR pcre.h
HINTS ${PC_PCRE_INCLUDEDIR} ${PC_PCRE_INCLUDE_DIRS}
PATH_SUFFIXES pcre)
find_library(PCRE_PCRE_LIBRARY NAMES pcre HINTS ${PC_PCRE_LIBDIR} ${PC_PCRE_LIBRARY_DIRS})
find_library(PCRE_PCREPOSIX_LIBRARY NAMES pcreposix HINTS ${PC_PCRE_LIBDIR} ${PC_PCRE_LIBRARY_DIRS})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(PCRE DEFAULT_MSG PCRE_INCLUDE_DIR PCRE_PCRE_LIBRARY PCRE_PCREPOSIX_LIBRARY )
set(PCRE_LIBRARIES ${PCRE_PCRE_LIBRARY} ${PCRE_PCREPOSIX_LIBRARY})
mark_as_advanced(PCRE_INCLUDE_DIR PCRE_LIBRARIES PCRE_PCREPOSIX_LIBRARY PCRE_PCRE_LIBRARY)

View File

@ -0,0 +1,57 @@
# - This module looks for Sphinx
# Find the Sphinx documentation generator
#
# This modules defines
# SPHINX_EXECUTABLE
# SPHINX_FOUND
# SPHINX_MAJOR_VERSION
# SPHINX_MINOR_VERSION
# SPHINX_VERSION
#=============================================================================
# Copyright 2002-2009 Kitware, Inc.
# Copyright 2009-2011 Peter Colberg
#
# Distributed under the OSI-approved BSD License (the "License");
# see accompanying file COPYING-CMAKE-SCRIPTS for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the License for more information.
#=============================================================================
# (To distribute this file outside of CMake, substitute the full
# License text for the above reference.)
find_program(SPHINX_EXECUTABLE NAMES sphinx-build
HINTS
$ENV{SPHINX_DIR}
PATH_SUFFIXES bin
DOC "Sphinx documentation generator"
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Sphinx DEFAULT_MSG
SPHINX_EXECUTABLE
)
if (SPHINX_EXECUTABLE)
execute_process (
COMMAND "${SPHINX_EXECUTABLE}" -h
OUTPUT_VARIABLE _SPHINX_VERSION_OUTPUT
ERROR_VARIABLE _SPHINX_VERSION_OUTPUT
)
if (_SPHINX_VERSION_OUTPUT MATCHES "Sphinx v([0-9]+\\.[0-9]+\\.[0-9]+)")
set (SPHINX_VERSION "${CMAKE_MATCH_1}")
string (REPLACE "." ";" _SPHINX_VERSION_LIST "${SPHINX_VERSION}")
list (GET _SPHINX_VERSION_LIST 0 SPHINX_MAJOR_VERSION)
list (GET _SPHINX_VERSION_LIST 1 SPHINX_MINOR_VERSION)
# patch version meh :)
endif()
endif()
message("${SPHINX_MAJOR_VERSION}")
mark_as_advanced(
SPHINX_EXECUTABLE
)

View File

@ -0,0 +1,142 @@
# - try to find cppcheck tool
#
# Cache Variables:
# CPPCHECK_EXECUTABLE
#
# Non-cache variables you might use in your CMakeLists.txt:
# CPPCHECK_FOUND
# CPPCHECK_POSSIBLEERROR_ARG
# CPPCHECK_UNUSEDFUNC_ARG
# CPPCHECK_STYLE_ARG
# CPPCHECK_QUIET_ARG
# CPPCHECK_INCLUDEPATH_ARG
# CPPCHECK_FAIL_REGULAR_EXPRESSION
# CPPCHECK_WARN_REGULAR_EXPRESSION
# CPPCHECK_MARK_AS_ADVANCED - whether to mark our vars as advanced even
# if we don't find this program.
#
# Requires these CMake modules:
# FindPackageHandleStandardArgs (known included with CMake >=2.6.2)
#
# Original Author:
# 2009-2010 Ryan Pavlik <rpavlik@iastate.edu> <abiryan@ryand.net>
# http://academic.cleardefinition.com
# Iowa State University HCI Graduate Program/VRAC
#
# Copyright Iowa State University 2009-2010.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
file(TO_CMAKE_PATH "${CPPCHECK_ROOT_DIR}" CPPCHECK_ROOT_DIR)
set(CPPCHECK_ROOT_DIR
"${CPPCHECK_ROOT_DIR}"
CACHE
PATH
"Path to search for cppcheck")
# cppcheck app bundles on Mac OS X are GUI, we want command line only
set(_oldappbundlesetting ${CMAKE_FIND_APPBUNDLE})
set(CMAKE_FIND_APPBUNDLE NEVER)
# If we have a custom path, look there first.
if(CPPCHECK_ROOT_DIR)
find_program(CPPCHECK_EXECUTABLE
NAMES
cppcheck
cli
PATHS
"${CPPCHECK_ROOT_DIR}"
PATH_SUFFIXES
cli
NO_DEFAULT_PATH)
endif()
find_program(CPPCHECK_EXECUTABLE NAMES cppcheck)
# Restore original setting for appbundle finding
set(CMAKE_FIND_APPBUNDLE ${_oldappbundlesetting})
if(CPPCHECK_EXECUTABLE)
# Find out where our test file is
get_filename_component(_cppcheckmoddir ${CMAKE_CURRENT_LIST_FILE} PATH)
set(_cppcheckdummyfile "${_cppcheckmoddir}/Findcppcheck.cpp")
# Check for the two types of command line arguments by just trying them
execute_process(COMMAND
"${CPPCHECK_EXECUTABLE}"
"--enable=style"
"--quiet"
"${_cppcheckdummyfile}"
RESULT_VARIABLE
_cppcheck_new_result
OUTPUT_QUIET
ERROR_QUIET)
execute_process(COMMAND
"${CPPCHECK_EXECUTABLE}"
"--style"
"--quiet"
"${_cppcheckdummyfile}"
RESULT_VARIABLE
_cppcheck_old_result
OUTPUT_QUIET
ERROR_QUIET)
if("${_cppcheck_new_result}" EQUAL 0)
# New arguments
set(CPPCHECK_UNUSEDFUNC_ARG "--enable=unusedFunctions")
set(CPPCHECK_POSSIBLEERROR_ARG "--enable=possibleError")
set(CPPCHECK_STYLE_ARG "--enable=style")
set(CPPCHECK_QUIET_ARG "--quiet")
set(CPPCHECK_INCLUDEPATH_ARG "-I")
if(MSVC)
set(CPPCHECK_TEMPLATE_ARG --template vs)
set(CPPCHECK_FAIL_REGULAR_EXPRESSION "[(]error[)]")
set(CPPCHECK_WARN_REGULAR_EXPRESSION "[(]style[)]")
elseif(CMAKE_COMPILER_IS_GNUCXX)
set(CPPCHECK_TEMPLATE_ARG --template gcc)
set(CPPCHECK_FAIL_REGULAR_EXPRESSION " error: ")
set(CPPCHECK_WARN_REGULAR_EXPRESSION " style: ")
else()
message(STATUS
"Warning: FindCppcheck doesn't know how to format error messages for your compiler!")
set(CPPCHECK_TEMPLATE_ARG --template gcc)
set(CPPCHECK_FAIL_REGULAR_EXPRESSION " error: ")
set(CPPCHECK_WARN_REGULAR_EXPRESSION " style: ")
endif()
elseif("${_cppcheck_old_result}" EQUAL 0)
# Old arguments
set(CPPCHECK_UNUSEDFUNC_ARG "--unused-functions")
set(CPPCHECK_POSSIBLEERROR_ARG "--all")
set(CPPCHECK_STYLE_ARG "--style")
set(CPPCHECK_QUIET_ARG "--quiet")
set(CPPCHECK_INCLUDEPATH_ARG "-I")
set(CPPCHECK_FAIL_REGULAR_EXPRESSION "error:")
set(CPPCHECK_WARN_REGULAR_EXPRESSION "[(]style[)]")
else()
# No idea - some other issue must be getting in the way
message(STATUS
"WARNING: Can't detect whether CPPCHECK wants new or old-style arguments!")
endif()
endif()
set(CPPCHECK_ALL
"${CPPCHECK_EXECUTABLE} ${CPPCHECK_POSSIBLEERROR_ARG} ${CPPCHECK_UNUSEDFUNC_ARG} ${CPPCHECK_STYLE_ARG} ${CPPCHECK_QUIET_ARG} ${CPPCHECK_INCLUDEPATH_ARG} some/include/path")
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(cppcheck
DEFAULT_MSG
CPPCHECK_ALL
CPPCHECK_EXECUTABLE
CPPCHECK_POSSIBLEERROR_ARG
CPPCHECK_UNUSEDFUNC_ARG
CPPCHECK_STYLE_ARG
CPPCHECK_INCLUDEPATH_ARG
CPPCHECK_QUIET_ARG)
if(CPPCHECK_FOUND OR CPPCHECK_MARK_AS_ADVANCED)
mark_as_advanced(CPPCHECK_ROOT_DIR)
endif()
mark_as_advanced(CPPCHECK_EXECUTABLE)

View File

@ -0,0 +1,16 @@
/**
* \file Findcppcheck.cpp
* \brief Dummy C++ source file used by CMake module Findcppcheck.cmake
*
* \author
* Ryan Pavlik, 2009-2010
* <rpavlik@iastate.edu>
* http://academic.cleardefinition.com/
*
*/
int main(int argc, char* argv[]) {
return 0;
}

45
common.h Normal file
View File

@ -0,0 +1,45 @@
/*
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Authors: Andrew Hutchings, SkySQL (andrew at skysql dot com)
*/
#ifndef _common_h
#define _common_h
char *hostname=NULL;
char *username=NULL;
char *password=NULL;
char *socket_path=NULL;
char *db=NULL;
guint port=3306;
guint num_threads= 4;
guint verbose=2;
gboolean compress_protocol= FALSE;
gboolean program_version= FALSE;
GOptionEntry common_entries[] =
{
{ "host", 'h', 0, G_OPTION_ARG_STRING, &hostname, "The host to connect to", NULL },
{ "user", 'u', 0, G_OPTION_ARG_STRING, &username, "Username with privileges to run the dump", NULL },
{ "password", 'p', 0, G_OPTION_ARG_STRING, &password, "User password", NULL },
{ "port", 'P', 0, G_OPTION_ARG_INT, &port, "TCP/IP port to connect to", NULL },
{ "socket", 'S', 0, G_OPTION_ARG_STRING, &socket_path, "UNIX domain socket file to use for connection", NULL },
{ "threads", 't', 0, G_OPTION_ARG_INT, &num_threads, "Number of threads to use, default 4", NULL },
{ "compress-protocol", 'C', 0, G_OPTION_ARG_NONE, &compress_protocol, "Use compression on the MySQL connection", NULL },
{ "version", 'V', 0, G_OPTION_ARG_NONE, &program_version, "Show the program version and exit", NULL },
{ "verbose", 'v', 0, G_OPTION_ARG_INT, &verbose, "Verbosity of output, 0 = silent, 1 = errors, 2 = warnings, 3 = info, default 2", NULL },
{ NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL }
};
#endif

7
config.h.in Normal file
View File

@ -0,0 +1,7 @@
#ifndef CONFIG_H
#define CONFIG_H
#cmakedefine VERSION "@VERSION@"
#cmakedefine WITH_BINLOG
#endif

156
docs/CMakeLists.txt Normal file
View File

@ -0,0 +1,156 @@
# Generate documentation in HTML and PDF format using Sphinx.
set(GENERATE_DOC TRUE)
# We use the Sphinx documentation generator to render HTML and manual
# pages from the user and reference documentation in ReST format.
find_package(Sphinx QUIET)
if(NOT SPHINX_FOUND)
message(WARNING "Unable to find Sphinx documentation generator")
set(GENERATE_DOC FALSE)
endif(NOT SPHINX_FOUND)
if(SPHINX_MAJOR_VERSION LESS 1)
message(WARNING "Sphinx is older than v1.0, not building docs")
set(GENERATE_DOC FALSE)
endif(SPHINX_MAJOR_VERSION LESS 1)
if(GENERATE_DOC)
# documentation tools
set(SOURCE_BUILD_DIR "${CMAKE_CURRENT_SOURCE_DIR}/_build")
# configured documentation tools and intermediate build results
set(BINARY_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/_build")
# static ReST documentation sources
set(SOURCES_DIR "${CMAKE_CURRENT_BINARY_DIR}/_sources")
# generated ReST documentation sources
set(REF_SOURCES_DIR "${SOURCES_DIR}/reference")
# master document with modules index
set(REF_MASTER_DOC "modules")
# substitute variables in configuration and scripts
foreach(file
conf.py
sources.cmake
)
configure_file(
"${SOURCE_BUILD_DIR}/${file}.in"
"${BINARY_BUILD_DIR}/${file}"
@ONLY
)
endforeach(file)
set(CLEAN_FILES
"${BINARY_BUILD_DIR}/html"
)
add_custom_target(ALL
DEPENDS "${REF_SOURCES_DIR}/${REF_MASTER_DOC}.rst"
)
# Sphinx requires all sources in the same directory tree. As we wish
# to include generated reference documention from the build tree, we
# copy static ReST documents to the build tree before calling Sphinx.
add_custom_target(doc_sources ALL
"${CMAKE_COMMAND}" -P "${BINARY_BUILD_DIR}/sources.cmake"
)
list(APPEND CLEAN_FILES
"${SOURCES_DIR}"
)
# note the trailing slash to exclude directory name
install(DIRECTORY "${SOURCES_DIR}/"
DESTINATION "share/doc/mydumper"
)
# Sphinx cache with pickled ReST documents
set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/_doctrees")
# HTML output directory
set(SPHINX_HTML_DIR "${CMAKE_CURRENT_BINARY_DIR}/html")
# This target builds HTML documentation using Sphinx.
add_custom_target(doc_html ALL
${SPHINX_EXECUTABLE}
-q -b html
-c "${BINARY_BUILD_DIR}"
-d "${SPHINX_CACHE_DIR}"
"${SOURCES_DIR}"
"${SPHINX_HTML_DIR}"
COMMENT "Building HTML documentation with Sphinx"
)
list(APPEND CLEAN_FILES
"${SPHINX_CACHE_DIR}"
"${SPHINX_HTML_DIR}"
)
add_dependencies(doc_html
doc_sources
)
install(DIRECTORY "${SPHINX_HTML_DIR}"
DESTINATION "share/doc/mydumper"
)
# HTML output directory
set(SPHINX_MAN_DIR "${CMAKE_CURRENT_BINARY_DIR}/man")
# This target builds a manual page using Sphinx.
add_custom_target(doc_man ALL
${SPHINX_EXECUTABLE}
-q -b man
-c "${BINARY_BUILD_DIR}"
-d "${SPHINX_CACHE_DIR}"
"${SOURCES_DIR}"
"${SPHINX_MAN_DIR}"
COMMENT "Building manual page with Sphinx"
)
list(APPEND CLEAN_FILES
"${SPHINX_MAN_DIR}"
)
add_dependencies(doc_man
doc_sources
)
# serialize Sphinx targets to avoid cache conflicts in parallel builds
add_dependencies(doc_man
doc_html
)
install(FILES "${SPHINX_MAN_DIR}/mydumper.1" "${SPHINX_MAN_DIR}/myloader.1"
DESTINATION "share/man/man1"
)
# This target builds PDF documentation using Sphinx and LaTeX.
if(PDFLATEX_COMPILER)
# PDF output directory
set(SPHINX_PDF_DIR "${CMAKE_CURRENT_BINARY_DIR}/pdf")
add_custom_target(doc_pdf ALL
${SPHINX_EXECUTABLE}
-q -b latex
-c "${BINARY_BUILD_DIR}"
-d "${SPHINX_CACHE_DIR}"
"${SOURCES_DIR}"
"${SPHINX_PDF_DIR}"
COMMENT "Building PDF documentation with Sphinx"
)
add_custom_command(TARGET doc_pdf POST_BUILD
COMMAND ${CMAKE_MAKE_PROGRAM} LATEXOPTS=-interaction=batchmode
WORKING_DIRECTORY "${SPHINX_PDF_DIR}"
)
list(APPEND CLEAN_FILES
"${SPHINX_PDF_DIR}"
)
add_dependencies(doc_pdf
doc_sources
)
# serialize Sphinx targets to avoid cache conflicts in parallel builds
add_dependencies(doc_pdf
doc_man
)
install(FILES "${SPHINX_PDF_DIR}/mydumper.pdf"
DESTINATION "share/doc/mydumper"
)
endif(PDFLATEX_COMPILER)
# Add output directories to clean target.
set_directory_properties(PROPERTIES
ADDITIONAL_MAKE_CLEAN_FILES "${CLEAN_FILES}"
)
endif(GENERATE_DOC)

218
docs/_build/conf.py.in vendored Normal file
View File

@ -0,0 +1,218 @@
# -*- coding: utf-8 -*-
#
# MySQL Data Dumper documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 26 11:44:25 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['@CMAKE_CURRENT_SOURCE_DIR@/_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'@PROJECT_NAME@'
copyright = u'2011, Andrew Hutchings'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '@VERSION@'
# The full version, including alpha/beta/rc tags.
release = '@VERSION@'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['@CMAKE_CURRENT_SOURCE_DIR@/_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MySQLDataDumperdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'MySQLDataDumper.tex', u'@PROJECT_NAME@ Documentation',
u'Andrew Hutchings', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('mydumper_usage', 'mydumper', u'@PROGRAM_DESC@',
[u'Andrew Hutchings'], 1),
('myloader_usage', 'myloader', u'@PROGRAM_DESC@',
[u'Andrew Hutchings'], 1)
]

16
docs/_build/sources.cmake.in vendored Normal file
View File

@ -0,0 +1,16 @@
# This script recursively copies all ReST documents from the source directory to
# the binary directory. CMAKE_CURRENT_SOURCE_DIR and SOURCES_DIR are substituted
# upon the cmake stage. The script is executed upon the make stage to ensure
# that the binary sources directory is always up to date.
file(GLOB SOURCES
RELATIVE "@CMAKE_CURRENT_SOURCE_DIR@"
"@CMAKE_CURRENT_SOURCE_DIR@/*.rst"
)
foreach(source ${SOURCES})
configure_file(
"@CMAKE_CURRENT_SOURCE_DIR@/${source}"
"@SOURCES_DIR@/${source}"
COPYONLY
)
endforeach(source)

9
docs/authors.rst Normal file
View File

@ -0,0 +1,9 @@
Authors
=======
The code for mydumper has been written by the following people:
* `Domas Mituzas <http://dom.as/>`_, Facebook ( domas at fb dot com )
* `Andrew Hutchings <http://www.linuxjedi.co.uk>`_, SkySQL ( andrew at skysql dot com )
* `Mark Leith <http://www.markleith.co.uk/>`_, Oracle Corporation ( mark dot leith at oracle dot com )
* `Max Bubenick <http://www.bube.com.ar>`_, Percona RDBA ( max dot bubenick at percona dot com )

70
docs/compiling.rst Normal file
View File

@ -0,0 +1,70 @@
Compiling
=========
Requirements
------------
mydumper requires the following before it can be compiled:
* `CMake <http://www.cmake.org/>`_
* `Glib2 <http://www.gtk.org/index.php>`_ (with development packages)
* `PCRE <http://www.pcre.org/>`_ (with development packages)
* `MySQL <http://www.mysql.com/>`_ client libraries (with development packages)
Additionally the following packages are optional:
* `python-sphinx <http://sphinx.pocoo.org/>`_ (for documentation)
Ubuntu/Debian
^^^^^^^^^^^^^
.. code-block:: bash
apt-get install libglib2.0-dev libmysqlclient15-dev zlib1g-dev libpcre3-dev
Fedora/Redhat/CentOS
^^^^^^^^^^^^^^^^^^^^
.. code-block:: bash
yum install glib2-devel mysql-devel zlib-devel pcre-devel
OpenSUSE
^^^^^^^^
.. code-block:: bash
zypper install glib2-devel libmysqlclient-devel pcre-devel zlib-devel
Mac OSX
^^^^^^^
.. code-block:: bash
port install glib2 mysql5 pcre
CMake
-----
CMake is used for mydumper's build system and is executed as follows::
cmake .
make
You can optionally provide parameters for CMake, the possible options are:
* ``-DMYSQL_CONFIG=/path/to/mysql_config`` - The path and filename for the mysql_config executable
* ``-DCMAKE_INSTALL_PREFIX=/install/path`` - The path where mydumper should be installed
Documentation
-------------
If you wish to just compile the documentation you can do so with::
cmake .
make doc_html
or for a man page output::
cmake .
make doc_man

37
docs/examples.rst Normal file
View File

@ -0,0 +1,37 @@
Examples
========
Simple Usage
------------
Just running :program:`mydumper` without any options will try to connect to a
server using the default socket path. It will then dump the tables from all
databases using 4 worker threads.
Regex
-----
To use :program:`mydumper`'s regex feature simply use the
:option:`--regex <mydumper --regex>` option. In the following example mydumper
will ignore the ``test`` and ``mysql`` databases::
mydumper --regex '^(?!(mysql|test))'
Restoring a dump
----------------
Mydumper now include myloader which is a multi-threaded restoration tool. To
use myloader with a mydumper dump you simply need to pass it the directory of
the dump along with a user capable of restoring the schemas and data. As an
example the following will restore a dump overwriting any existing tables::
myloader --directory=export-20110614-094953 --overwrite-tables --user=root
Daemon mode
-----------
Mydumper has a daemon mode which will snapshot the dump data every so often
whilst continuously retreiving the binary log files. This gives a continuous
consistent backup right up to the point where the database server fails. To use
this you simply need to use the :option:`--daemon <mydumper --daemon>` option.
In the following example mydumper will use daemon mode, creating a snapshot
every half an hour and log to an output file::
mydumper --daemon --snapshot-interval=30 --logfile=dump.log

61
docs/files.rst Normal file
View File

@ -0,0 +1,61 @@
Output Files
============
mydumper generates several files during the generation of the dump. Many of
these are for the table data itself since every table has at least one file.
Metadata
--------
When a dump is executed a file called ``metadata.partial`` is created in the output
directory and is renamed to ``metadata`` when mydumper finish without error.
This contains the start and end time of the dump as well as the
master binary log positions if applicable.
This is an example of the content of this file::
Started dump at: 2011-05-05 13:57:17
SHOW MASTER STATUS:
Log: linuxjedi-laptop-bin.000001
Pos: 106
Finished dump at: 2011-05-05 13:57:17
Table Data
----------
The data from every table is written into a separate file, also if the
:option:`--rows <mydumper --rows>` option is used then each chunk of table will
be in a separate file. The file names for this are in the format::
database.table.sql(.gz)
or if chunked::
database.table.chunk.sql(.gz)
Where 'chunk' is a number padded with up to 5 zeros.
Table Schemas
-------------
When the :option:`--schemas <mydumper --schemas>` option is used mydumper will
create a file for the schema of every table it is writing data for. The files
for this are in the following format::
database.table-schema.sql(.gz)
Binary Logs
-----------
Binary logs are retrieved when :option:`--binlogs <mydumper --binlogs>` option
has been set. This will store them in the ``binlog_snapshot/`` sub-directory
inside the dump directory.
The binary log files have the same filename as the MySQL server that supplies them and will also have a .gz on the end if they are compressed.
Daemon mode
-----------
Daemon mode does things a little differently. There are the directories ``0``
and ``1`` inside the dump directory. These alternate when dumping so that if
mydumper fails for any reason there is still a good snapshot. When a snapshot
dump is complete the ``last_dump`` symlink is updated to point to that dump.
If binary logging is enabled mydumper will connect as if it is a slave server
and constantly retreives the binary logs into the ``binlogs`` subdirectory.

25
docs/index.rst Normal file
View File

@ -0,0 +1,25 @@
.. MySQL Data Dumper documentation master file
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to MySQL Data Dumper's documentation!
=============================================
Contents:
.. toctree::
:maxdepth: 2
authors
compiling
mydumper_usage
myloader_usage
files
examples
Indices and tables
==================
* :ref:`genindex`
* :ref:`search`

193
docs/mydumper_usage.rst Normal file
View File

@ -0,0 +1,193 @@
Mydumper Usage
==============
Synopsis
--------
:program:`mydumper` [:ref:`OPTIONS <mydumper-options-label>`]
Description
-----------
:program:`mydumper` is a tool used for backing up MySQL database servers much
faster than the mysqldump tool distributed with MySQL. It also has the
capability to retrieve the binary logs from the remote server at the same time
as the dump itself. The advantages of mydumper are:
* Parallelism (hence, speed) and performance (avoids expensive character set conversion routines, efficient code overall)
* Easier to manage output (separate files for tables, dump metadata, etc, easy to view/parse data)
* Consistency - maintains snapshot across all threads, provides accurate master and slave log positions, etc
* Manageability - supports PCRE for specifying database and tables inclusions and exclusions
.. _mydumper-options-label:
Options
-------
The :program:`mydumper` tool has several available options:
.. program:: mydumper
.. option:: --help, -?
Show help text
.. option:: --host, -h
Hostname of MySQL server to connect to (default localhost)
.. option:: --user, -u
MySQL username with the correct privileges to execute the dump
.. option:: --password, -p
The corresponding password for the MySQL user
.. option:: --port, -P
The port for the MySQL connection.
.. note::
For localhost TCP connections use 127.0.0.1 for :option:`--host`.
.. option:: --socket, -S
The UNIX domain socket file to use for the connection
.. option:: --database, -B
Database to dump
.. option:: --tables-list, -T
A comma separated list of tables to dump
.. option:: --threads, -t
The number of threads to use for dumping data, default is 4
.. note::
Other threads are used in mydumper, this option does not control these
.. option:: --outputdir, -o
Output directory name, default is export-YYYYMMDD-HHMMSS
.. option:: --statement-size, -s
The maximum size for an insert statement before breaking into a new
statement, default 1,000,000 bytes
.. option:: --rows, -r
Split table into chunks of this many rows, default unlimited
.. option:: --compress, -c
Compress the output files
.. option:: --compress-input, -C
Use client protocol compression for connections to the MySQL server
.. option:: --build-empty-files, -e
Create empty dump files if there is no data to dump
.. option:: --regex, -x
A regular expression to match against database and table
.. option:: --ignore-engines, -i
Comma separated list of storage engines to ignore
.. option:: --no-schemas, -m
Do not dump schemas with the data
.. option:: --no-data, -d
Do not dump table data
.. option:: --triggers, -G
Dump triggers
.. option:: --events, -E
Dump events
.. option:: --routines, -R
Dump stored procedures and functions
.. option:: --long-query-guard, -l
Timeout for long query execution in seconds, default 60
.. option:: --kill-long-queries, -K
Kill long running queries instead of aborting the dump
.. option:: --version, -V
Show the program version and exit
.. option:: --verbose, -v
The verbosity of messages. 0 = silent, 1 = errors, 2 = warnings, 3 = info.
Default is 2.
.. option:: --binlogs, -b
Get the binlogs from the server as well as the dump files (You need to compile with -DWITH_BINLOG=ON)
.. option:: --daemon, -D
Enable daemon mode
.. option:: --snapshot-interval, -I
Interval between each dump snapshot (in minutes), requires
:option:`--daemon`, default 60 (minutes)
.. option:: --logfile, -L
A file to log mydumper output to instead of console output. Useful for
daemon mode.
.. option:: --no-locks, -k
Do not execute the temporary shared read lock.
.. warning::
This will cause inconsistent backups.
.. option:: --[skip-]tz-utc
SET TIME_ZONE='+00:00' at top of dump to allow dumping of TIMESTAMP data
when a server has data in different time zones or data is being moved
between servers with different time zones, defaults to on use --skip-tz-utc
to disable.
.. option:: --less-locking
Minimize locking time on InnoDB tables grabbing a LOCK TABLE ... READ
on all non-innodb tables.
.. option:: --chunk-filesize -F
Split tables into chunks of this output file size. This value is in MB
.. option:: --success-on-1146
Not increment error count and Warning instead of Critical in case of table doesn't exist
.. option:: --use-savepoints
Use savepoints to reduce metadata locking issues, needs SUPER privilege

99
docs/myloader_usage.rst Normal file
View File

@ -0,0 +1,99 @@
Myloader Usage
==============
Synopsis
--------
:program:`myloader` :option:`--directory <myloader --directory>` = /path/to/mydumper/backup [:ref:`OPTIONS <myloader-options-label>`]
Description
-----------
:program:`myloader` is a tool used for multi-threaded restoration of mydumper
backups.
.. _myloader-options-label:
Options
-------
The :program:`myloader` tool has several available options:
.. program:: myloader
.. option:: --help, -?
Show help text
.. option:: --host, -h
Hostname of MySQL server to connect to (default localhost)
.. option:: --user, -u
MySQL username with the correct privileges to execute the restoration
.. option:: --password, -p
The corresponding password for the MySQL user
.. option:: --port, -P
The port for the MySQL connection.
.. note::
For localhost TCP connections use 127.0.0.1 for :option:`--host`.
.. option:: --socket, -S
The UNIX domain socket file to use for the connection
.. option:: --threads, -t
The number of threads to use for restoring data, default is 4
.. option:: --version, -V
Show the program version and exit
.. option:: --compress-protocol, -C
Use client protocol compression for connections to the MySQL server
.. option:: --directory, -d
The directory of the mydumper backup to restore
.. option:: --database, -B
An alternative database to load the dump into
.. note::
For use with single database dumps. When using with multi-database dumps
that have duplicate table names in more than one database it may cause
errors. Alternatively this scenario may give unpredictable results with
:option:`--overwrite-tables`.
.. option:: --source-db, -s
Database to restore, useful in combination with --database
.. option:: --queries-per-transaction, -q
Number of INSERT queries to execute per transaction during restore, default
is 1000.
.. option:: --overwrite-tables, -o
Drop any existing tables when restoring schemas
.. option:: --enable-binlog, -e
Log the data loading in the MySQL binary log if enabled (off by default)
.. option:: --verbose, -v
The verbosity of messages. 0 = silent, 1 = errors, 2 = warnings, 3 = info.
Default is 2.

128
g_unix_signal.c Normal file
View File

@ -0,0 +1,128 @@
#define _POSIX_SOURCE
#include <signal.h>
#include <glib.h>
static GPtrArray *signal_data = NULL;
typedef struct _GUnixSignalData {
guint source_id;
GMainContext *context;
gboolean triggered;
gint signum;
} GUnixSignalData;
typedef struct _GUnixSignalSource {
GSource source;
GUnixSignalData *data;
} GUnixSignalSource;
static inline GUnixSignalData* get_signal_data(guint index)
{
return (GUnixSignalData*)g_ptr_array_index(signal_data, index);
}
static void handler(gint signum) {
g_assert(signal_data != NULL);
guint i;
for (i = 0; i < signal_data->len; ++i)
if (get_signal_data(i)->signum == signum)
get_signal_data(i)->triggered = TRUE;
struct sigaction action;
action.sa_handler= handler;
sigemptyset (&action.sa_mask);
action.sa_flags = 0;
sigaction(signum, &action, NULL);
}
static gboolean check(GSource *source)
{
GUnixSignalSource *signal_source = (GUnixSignalSource*) source;
return signal_source->data->triggered;
}
static gboolean prepare(GSource *source, gint *timeout_)
{
GUnixSignalSource *signal_source = (GUnixSignalSource*) source;
if (signal_source->data->context == NULL) {
g_main_context_ref(signal_source->data->context = g_source_get_context(source));
signal_source->data->source_id = g_source_get_id(source);
}
*timeout_ = -1;
return signal_source->data->triggered;
}
static gboolean dispatch(GSource *source, GSourceFunc callback, gpointer user_data)
{
GUnixSignalSource *signal_source = (GUnixSignalSource*) source;
signal_source->data->triggered = FALSE;
return callback(user_data) ? TRUE : FALSE;
}
static void finalize(GSource *source)
{
GUnixSignalSource *signal_source = (GUnixSignalSource*) source;
struct sigaction action;
action.sa_handler= NULL;
sigemptyset (&action.sa_mask);
action.sa_flags = 0;
sigaction(signal_source->data->signum, &action, NULL);
g_main_context_unref(signal_source->data->context);
g_ptr_array_remove_fast(signal_data, signal_source->data);
if (signal_data->len == 0)
signal_data = (GPtrArray*) g_ptr_array_free(signal_data, TRUE);
g_free(signal_source->data);
}
static GSourceFuncs SourceFuncs =
{
.prepare = prepare,
.check = check,
.dispatch = dispatch,
.finalize = finalize,
.closure_callback = NULL, .closure_marshal = NULL
};
static void g_unix_signal_source_init(GSource *source, gint signum)
{
GUnixSignalSource *signal_source = (GUnixSignalSource *) source;
signal_source->data = g_new(GUnixSignalData, 1);
signal_source->data->triggered = FALSE;
signal_source->data->signum = signum;
signal_source->data->context = NULL;
if (signal_data == NULL)
signal_data = g_ptr_array_new();
g_ptr_array_add(signal_data, signal_source->data);
}
GSource *g_unix_signal_source_new(gint signum)
{
GSource *source = g_source_new(&SourceFuncs, sizeof(GUnixSignalSource));
g_unix_signal_source_init(source, signum);
struct sigaction action;
action.sa_handler= handler;
sigemptyset (&action.sa_mask);
action.sa_flags = 0;
sigaction(signum, &action, NULL);
return source;
}
guint g_unix_signal_add_full(gint priority, gint signum, GSourceFunc function, gpointer data, GDestroyNotify notify)
{
g_return_val_if_fail(function != NULL, 0);
GSource *source = g_unix_signal_source_new(signum);
if (priority != G_PRIORITY_DEFAULT)
g_source_set_priority (source, priority);
g_source_set_callback(source, function, data, notify);
guint id = g_source_attach(source, NULL);
g_source_unref(source);
return id;
}
guint g_unix_signal_add(gint signum, GSourceFunc function, gpointer data)
{
return g_unix_signal_add_full(G_PRIORITY_DEFAULT, signum, function, data, NULL);
}

10
g_unix_signal.h Normal file
View File

@ -0,0 +1,10 @@
#ifndef G_UNIX_SIGNAL_H
#define G_UNIX_SIGNAL_H
#include <glib.h>
GSource *g_unix_signal_source_new(gint signum);
guint g_unix_signal_add(gint signum, GSourceFunc function, gpointer data);
guint g_unix_signal_add_full(gint priority, gint signum, GSourceFunc function, gpointer data, GDestroyNotify notify);
#endif /* G_UNIX_SIGNAL_H */

2890
mydumper.c Normal file

File diff suppressed because it is too large Load Diff

100
mydumper.h Normal file
View File

@ -0,0 +1,100 @@
/*
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Authors: Domas Mituzas, Facebook ( domas at fb dot com )
Mark Leith, Oracle Corporation (mark dot leith at oracle dot com)
Andrew Hutchings, SkySQL (andrew at skysql dot com)
Max Bubenick, Percona RDBA (max dot bubenick at percona dot com)
*/
#ifndef _mydumper_h
#define _mydumper_h
enum job_type { JOB_SHUTDOWN, JOB_RESTORE, JOB_DUMP, JOB_DUMP_NON_INNODB, JOB_SCHEMA, JOB_VIEW, JOB_TRIGGERS, JOB_SCHEMA_POST, JOB_BINLOG, JOB_LOCK_DUMP_NON_INNODB };
struct configuration {
char use_any_index;
GAsyncQueue* queue;
GAsyncQueue* queue_less_locking;
GAsyncQueue* ready;
GAsyncQueue* ready_less_locking;
GAsyncQueue* unlock_tables;
GMutex* mutex;
int done;
};
struct thread_data {
struct configuration *conf;
guint thread_id;
};
struct job {
enum job_type type;
void *job_data;
struct configuration *conf;
};
struct table_job {
char *database;
char *table;
char *filename;
char *where;
};
struct tables_job {
GList* table_job_list;
};
struct schema_job {
char *database;
char *table;
char *filename;
};
struct view_job {
char *database;
char *table;
char *filename;
char *filename2;
};
struct schema_post_job {
char *database;
char *filename;
};
struct restore_job {
char *database;
char *table;
char *filename;
};
struct binlog_job {
char *filename;
guint64 start_position;
guint64 stop_position;
};
struct db_table {
char* database;
char* table;
guint64 datalength;
};
struct schema_post {
char* database;
};
#endif

577
myloader.c Normal file
View File

@ -0,0 +1,577 @@
/*
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Authors: Andrew Hutchings, SkySQL (andrew at skysql dot com)
*/
#define _LARGEFILE64_SOURCE
#define _FILE_OFFSET_BITS 64
#include <mysql.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <glib.h>
#include <glib/gstdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <errno.h>
#include <zlib.h>
#include "common.h"
#include "myloader.h"
#include "config.h"
guint commit_count= 1000;
gchar *directory= NULL;
gboolean overwrite_tables= FALSE;
gboolean enable_binlog= FALSE;
gchar *source_db= NULL;
static GMutex *init_mutex= NULL;
guint errors= 0;
gboolean read_data(FILE *file, gboolean is_compressed, GString *data, gboolean *eof);
void restore_data(MYSQL *conn, char *database, char *table, const char *filename, gboolean is_schema, gboolean need_use);
void *process_queue(struct thread_data *td);
void add_table(const gchar* filename, struct configuration *conf);
void add_schema(const gchar* filename, MYSQL *conn);
void restore_databases(struct configuration *conf, MYSQL *conn);
void restore_schema_view(MYSQL *conn);
void restore_schema_triggers(MYSQL *conn);
void restore_schema_post(MYSQL *conn);
void no_log(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data);
void set_verbose(guint verbosity);
void create_database(MYSQL *conn, gchar *database);
static GOptionEntry entries[] =
{
{ "directory", 'd', 0, G_OPTION_ARG_STRING, &directory, "Directory of the dump to import", NULL },
{ "queries-per-transaction", 'q', 0, G_OPTION_ARG_INT, &commit_count, "Number of queries per transaction, default 1000", NULL },
{ "overwrite-tables", 'o', 0, G_OPTION_ARG_NONE, &overwrite_tables, "Drop tables if they already exist", NULL },
{ "database", 'B', 0, G_OPTION_ARG_STRING, &db, "An alternative database to restore into", NULL },
{ "source-db", 's', 0, G_OPTION_ARG_STRING, &source_db, "Database to restore", NULL },
{ "enable-binlog", 'e', 0, G_OPTION_ARG_NONE, &enable_binlog, "Enable binary logging of the restore data", NULL },
{ NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL }
};
void no_log(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data) {
(void) log_domain;
(void) log_level;
(void) message;
(void) user_data;
}
void set_verbose(guint verbosity) {
switch (verbosity) {
case 0:
g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_MASK), no_log, NULL);
break;
case 1:
g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_WARNING | G_LOG_LEVEL_MESSAGE), no_log, NULL);
break;
case 2:
g_log_set_handler(NULL, (GLogLevelFlags)(G_LOG_LEVEL_MESSAGE), no_log, NULL);
break;
default:
break;
}
}
int main(int argc, char *argv[]) {
struct configuration conf= { NULL, NULL, NULL, 0 };
GError *error= NULL;
GOptionContext *context;
g_thread_init(NULL);
init_mutex= g_mutex_new();
if(db == NULL && source_db != NULL){
db = g_strdup(source_db);
}
context= g_option_context_new("multi-threaded MySQL loader");
GOptionGroup *main_group= g_option_group_new("main", "Main Options", "Main Options", NULL, NULL);
g_option_group_add_entries(main_group, entries);
g_option_group_add_entries(main_group, common_entries);
g_option_context_set_main_group(context, main_group);
if (!g_option_context_parse(context, &argc, &argv, &error)) {
g_print("option parsing failed: %s, try --help\n", error->message);
exit(EXIT_FAILURE);
}
g_option_context_free(context);
if (program_version) {
g_print("myloader %s, built against MySQL %s\n", VERSION, MYSQL_SERVER_VERSION);
exit(EXIT_SUCCESS);
}
set_verbose(verbose);
if (!directory) {
g_critical("a directory needs to be specified, see --help\n");
exit(EXIT_FAILURE);
} else {
char *p= g_strdup_printf("%s/metadata", directory);
if (!g_file_test(p, G_FILE_TEST_EXISTS)) {
g_critical("the specified directory is not a mydumper backup\n");
exit(EXIT_FAILURE);
}
}
MYSQL *conn;
conn= mysql_init(NULL);
mysql_options(conn, MYSQL_READ_DEFAULT_GROUP, "myloader");
if (!mysql_real_connect(conn, hostname, username, password, NULL, port, socket_path, 0)) {
g_critical("Error connection to database: %s", mysql_error(conn));
exit(EXIT_FAILURE);
}
if (mysql_query(conn, "SET SESSION wait_timeout = 2147483")){
g_warning("Failed to increase wait_timeout: %s", mysql_error(conn));
}
if (!enable_binlog)
mysql_query(conn, "SET SQL_LOG_BIN=0");
mysql_query(conn, "/*!40014 SET FOREIGN_KEY_CHECKS=0*/");
conf.queue= g_async_queue_new();
conf.ready= g_async_queue_new();
guint n;
GThread **threads= g_new(GThread*, num_threads);
struct thread_data *td= g_new(struct thread_data, num_threads);
for (n= 0; n < num_threads; n++) {
td[n].conf= &conf;
td[n].thread_id= n+1;
threads[n]= g_thread_create((GThreadFunc)process_queue, &td[n], TRUE, NULL);
g_async_queue_pop(conf.ready);
}
g_async_queue_unref(conf.ready);
g_message("%d threads created", num_threads);
restore_databases(&conf, conn);
for (n= 0; n < num_threads; n++) {
struct job *j= g_new0(struct job, 1);
j->type = JOB_SHUTDOWN;
g_async_queue_push(conf.queue, j);
}
for (n= 0; n < num_threads; n++) {
g_thread_join(threads[n]);
}
restore_schema_post(conn);
restore_schema_view(conn);
restore_schema_triggers(conn);
g_async_queue_unref(conf.queue);
mysql_close(conn);
mysql_thread_end();
mysql_library_end();
g_free(directory);
g_free(td);
g_free(threads);
return errors ? EXIT_FAILURE : EXIT_SUCCESS;
}
void restore_databases(struct configuration *conf, MYSQL *conn) {
GError *error= NULL;
GDir* dir= g_dir_open(directory, 0, &error);
if (error) {
g_critical("cannot open directory %s, %s\n", directory, error->message);
errors++;
return;
}
const gchar* filename= NULL;
while((filename= g_dir_read_name(dir))) {
if (!source_db || g_str_has_prefix(filename, g_strdup_printf("%s.", source_db))){
if (g_strrstr(filename, "-schema.sql")) {
add_schema(filename, conn);
}
}
}
g_dir_rewind(dir);
while((filename= g_dir_read_name(dir))) {
if (!source_db || g_str_has_prefix(filename, g_strdup_printf("%s.", source_db))){
if (!g_strrstr(filename, "-schema.sql")
&& !g_strrstr(filename, "-schema-view.sql")
&& !g_strrstr(filename, "-schema-triggers.sql")
&& !g_strrstr(filename, "-schema-post.sql")
&& !g_strrstr(filename, "-schema-create.sql")
&& g_strrstr(filename, ".sql")) {
add_table(filename, conf);
}
}
}
g_dir_close(dir);
}
void restore_schema_view(MYSQL *conn){
GError *error= NULL;
GDir* dir= g_dir_open(directory, 0, &error);
if (error) {
g_critical("cannot open directory %s, %s\n", directory, error->message);
errors++;
return;
}
const gchar* filename= NULL;
while((filename= g_dir_read_name(dir))) {
if (!source_db || g_str_has_prefix(filename, source_db)){
if (g_strrstr(filename, "-schema-view.sql")) {
add_schema(filename, conn);
}
}
}
g_dir_close(dir);
}
void restore_schema_triggers(MYSQL *conn){
GError *error= NULL;
GDir* dir= g_dir_open(directory, 0, &error);
gchar** split_file= NULL;
gchar* database=NULL;
gchar** split_table= NULL;
gchar* table= NULL;
if (error) {
g_critical("cannot open directory %s, %s\n", directory, error->message);
errors++;
return;
}
const gchar* filename= NULL;
while((filename= g_dir_read_name(dir))) {
if (!source_db || g_str_has_prefix(filename, source_db)){
if (g_strrstr(filename, "-schema-triggers.sql")) {
split_file= g_strsplit(filename, ".", 0);
database= split_file[0];
split_table= g_strsplit(split_file[1], "-schema", 0);
table= split_table[0];
g_message("Restoring triggers for `%s`.`%s`", db ? db : database, table);
restore_data(conn, database, table, filename, TRUE, TRUE);
}
}
}
g_strfreev(split_table);
g_strfreev(split_file);
g_dir_close(dir);
}
void restore_schema_post(MYSQL *conn){
GError *error= NULL;
GDir* dir= g_dir_open(directory, 0, &error);
gchar** split_file= NULL;
gchar* database=NULL;
//gchar* table=NULL;
if (error) {
g_critical("cannot open directory %s, %s\n", directory, error->message);
errors++;
return;
}
const gchar* filename= NULL;
while((filename= g_dir_read_name(dir))) {
if (!source_db || g_str_has_prefix(filename, source_db)){
if (g_strrstr(filename, "-schema-post.sql")) {
split_file= g_strsplit(filename, "-schema-post.sql", 0);
database= split_file[0];
//table= split_file[0]; //NULL
g_message("Restoring routines and events for `%s`", db ? db : database);
restore_data(conn, database, NULL, filename, TRUE, TRUE);
}
}
}
g_strfreev(split_file);
g_dir_close(dir);
}
void create_database(MYSQL *conn, gchar *database){
gchar* query = NULL;
if((db == NULL && source_db == NULL) || (db != NULL && source_db != NULL && !g_ascii_strcasecmp(db, source_db))){
const gchar* filename= g_strdup_printf("%s-schema-create.sql", db ? db : database);
const gchar* filenamegz= g_strdup_printf("%s-schema-create.sql.gz", db ? db : database);
if (g_file_test (filename, G_FILE_TEST_EXISTS)){
restore_data(conn, database, NULL, filename, TRUE, FALSE);
}else if (g_file_test (filenamegz, G_FILE_TEST_EXISTS)){
restore_data(conn, database, NULL, filenamegz, TRUE, FALSE);
}else{
query= g_strdup_printf("CREATE DATABASE `%s`", db ? db : database);
mysql_query(conn, query);
}
}else{
query= g_strdup_printf("CREATE DATABASE `%s`", db ? db : database);
mysql_query(conn, query);
}
g_free(query);
return;
}
void add_schema(const gchar* filename, MYSQL *conn) {
// 0 is database, 1 is table with -schema on the end
gchar** split_file= g_strsplit(filename, ".", 0);
gchar* database= split_file[0];
// Remove the -schema from the table name
gchar** split_table= g_strsplit(split_file[1], "-schema", 0);
gchar* table= split_table[0];
gchar* query= g_strdup_printf("SHOW CREATE DATABASE `%s`", db ? db : database);
if (mysql_query(conn, query)) {
g_message("Creating database `%s`", db ? db : database);
create_database(conn, database);
} else {
MYSQL_RES *result= mysql_store_result(conn);
// In drizzle the query succeeds with no rows
my_ulonglong row_count= mysql_num_rows(result);
mysql_free_result(result);
if (row_count == 0) {
create_database(conn, database);
}
}
if (overwrite_tables) {
g_message("Dropping table or view (if exists) `%s`.`%s`", db ? db : database, table);
query= g_strdup_printf("DROP TABLE IF EXISTS `%s`.`%s`", db ? db : database, table);
mysql_query(conn, query);
query= g_strdup_printf("DROP VIEW IF EXISTS `%s`.`%s`", db ? db : database, table);
mysql_query(conn, query);
}
g_free(query);
g_message("Creating table `%s`.`%s`", db ? db : database, table);
restore_data(conn, database, table, filename, TRUE, TRUE);
g_strfreev(split_table);
g_strfreev(split_file);
return;
}
void add_table(const gchar* filename, struct configuration *conf) {
struct job *j= g_new0(struct job, 1);
struct restore_job *rj= g_new(struct restore_job, 1);
j->job_data= (void*) rj;
rj->filename= g_strdup(filename);
j->type= JOB_RESTORE;
gchar** split_file= g_strsplit(filename, ".", 0);
rj->database= g_strdup(split_file[0]);
rj->table= g_strdup(split_file[1]);
rj->part= g_ascii_strtoull(split_file[2], NULL, 10);
g_async_queue_push(conf->queue, j);
return;
}
void *process_queue(struct thread_data *td) {
struct configuration *conf= td->conf;
g_mutex_lock(init_mutex);
MYSQL *thrconn= mysql_init(NULL);
g_mutex_unlock(init_mutex);
mysql_options(thrconn, MYSQL_READ_DEFAULT_GROUP, "myloader");
if (compress_protocol)
mysql_options(thrconn, MYSQL_OPT_COMPRESS, NULL);
if (!mysql_real_connect(thrconn, hostname, username, password, NULL, port, socket_path, 0)) {
g_critical("Failed to connect to MySQL server: %s", mysql_error(thrconn));
exit(EXIT_FAILURE);
}
if (mysql_query(thrconn, "SET SESSION wait_timeout = 2147483")){
g_warning("Failed to increase wait_timeout: %s", mysql_error(thrconn));
}
if (!enable_binlog)
mysql_query(thrconn, "SET SQL_LOG_BIN=0");
mysql_query(thrconn, "/*!40101 SET NAMES binary*/");
mysql_query(thrconn, "/*!40101 SET SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */");
mysql_query(thrconn, "/*!40014 SET UNIQUE_CHECKS=0 */");
mysql_query(thrconn, "SET autocommit=0");
g_async_queue_push(conf->ready, GINT_TO_POINTER(1));
struct job* job= NULL;
struct restore_job* rj= NULL;
for(;;) {
job= (struct job*)g_async_queue_pop(conf->queue);
switch (job->type) {
case JOB_RESTORE:
rj= (struct restore_job *)job->job_data;
g_message("Thread %d restoring `%s`.`%s` part %d", td->thread_id, rj->database, rj->table, rj->part);
restore_data(thrconn, rj->database, rj->table, rj->filename, FALSE, TRUE);
if (rj->database) g_free(rj->database);
if (rj->table) g_free(rj->table);
if (rj->filename) g_free(rj->filename);
g_free(rj);
g_free(job);
break;
case JOB_SHUTDOWN:
g_message("Thread %d shutting down", td->thread_id);
if (thrconn)
mysql_close(thrconn);
g_free(job);
mysql_thread_end();
return NULL;
break;
default:
g_critical("Something very bad happened!");
exit(EXIT_FAILURE);
}
}
if (thrconn)
mysql_close(thrconn);
mysql_thread_end();
return NULL;
}
void restore_data(MYSQL *conn, char *database, char *table, const char *filename, gboolean is_schema, gboolean need_use) {
void *infile;
gboolean is_compressed= FALSE;
gboolean eof= FALSE;
guint query_counter= 0;
GString *data= g_string_sized_new(512);
gchar* path= g_build_filename(directory, filename, NULL);
if (!g_str_has_suffix(path, ".gz")) {
infile= g_fopen(path, "r");
is_compressed= FALSE;
} else {
infile= (void*) gzopen(path, "r");
is_compressed= TRUE;
}
if (!infile) {
g_critical("cannot open file %s (%d)", filename, errno);
errors++;
return;
}
if(need_use){
gchar *query= g_strdup_printf("USE `%s`", db ? db : database);
if (mysql_query(conn, query)) {
g_critical("Error switching to database %s whilst restoring table %s", db ? db : database, table);
g_free(query);
errors++;
return;
}
g_free(query);
}
if (!is_schema)
mysql_query(conn, "START TRANSACTION");
while (eof == FALSE) {
if (read_data(infile, is_compressed, data, &eof)) {
// Search for ; in last 5 chars of line
if (g_strrstr(&data->str[data->len >= 5 ? data->len - 5 : 0], ";\n")) {
if (mysql_real_query(conn, data->str, data->len)) {
g_critical("Error restoring %s.%s from file %s: %s", db ? db : database, table, filename, mysql_error(conn));
errors++;
return;
}
query_counter++;
if (!is_schema &&(query_counter == commit_count)) {
query_counter= 0;
if (mysql_query(conn, "COMMIT")) {
g_critical("Error committing data for %s.%s: %s", db ? db : database, table, mysql_error(conn));
errors++;
return;
}
mysql_query(conn, "START TRANSACTION");
}
g_string_set_size(data, 0);
}
} else {
g_critical("error reading file %s (%d)", filename, errno);
errors++;
return;
}
}
if (!is_schema && mysql_query(conn, "COMMIT")) {
g_critical("Error committing data for %s.%s from file %s: %s", db ? db : database, table, filename, mysql_error(conn));
errors++;
}
g_string_free(data, TRUE);
g_free(path);
if (!is_compressed) {
fclose(infile);
} else {
gzclose((gzFile)infile);
}
return;
}
gboolean read_data(FILE *file, gboolean is_compressed, GString *data, gboolean *eof) {
char buffer[256];
do {
if (!is_compressed) {
if (fgets(buffer, 256, file) == NULL) {
if (feof(file)) {
*eof= TRUE;
buffer[0]= '\0';
} else {
return FALSE;
}
}
} else {
if (!gzgets((gzFile)file, buffer, 256)) {
if (gzeof((gzFile)file)) {
*eof= TRUE;
buffer[0]= '\0';
} else {
return FALSE;
}
}
}
g_string_append(data, buffer);
} while ((buffer[strlen(buffer)] != '\0') && *eof == FALSE);
return TRUE;
}

51
myloader.h Normal file
View File

@ -0,0 +1,51 @@
/*
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Authors: Domas Mituzas, Facebook ( domas at fb dot com )
Mark Leith, Oracle Corporation (mark dot leith at oracle dot com)
Andrew Hutchings, SkySQL (andrew at skysql dot com)
*/
#ifndef _myloader_h
#define _myloader_h
enum job_type { JOB_SHUTDOWN, JOB_RESTORE };
struct configuration {
GAsyncQueue* queue;
GAsyncQueue* ready;
GMutex* mutex;
int done;
};
struct thread_data {
struct configuration *conf;
guint thread_id;
};
struct job {
enum job_type type;
void *job_data;
struct configuration *conf;
};
struct restore_job {
char *database;
char *table;
char *filename;
guint part;
};
#endif

71
server_detect.c Normal file
View File

@ -0,0 +1,71 @@
/*
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Authors: Andrew Hutchings, SkySQL (andrew at skysql dot com)
*/
#include <pcre.h>
#include <glib.h>
#include <string.h>
#include "server_detect.h"
int detect_server(MYSQL *conn) {
pcre *re= NULL;
const char *error;
int erroroffset;
int ovector[9]= {0};
int rc;
const char* db_version= mysql_get_server_info(conn);
re= pcre_compile(DETECT_MYSQL_REGEX, 0, &error, &erroroffset, NULL);
if (!re) {
g_critical("Regular expression fail: %s", error);
exit(EXIT_FAILURE);
}
rc = pcre_exec(re, NULL, db_version, strlen(db_version), 0, 0, ovector, 9);
pcre_free(re);
if (rc > 0) {
return SERVER_TYPE_MYSQL;
}
re= pcre_compile(DETECT_DRIZZLE_REGEX, 0, &error, &erroroffset, NULL);
if (!re) {
g_critical("Regular expression fail: %s", error);
exit(EXIT_FAILURE);
}
rc = pcre_exec(re, NULL, db_version, strlen(db_version), 0, 0, ovector, 9);
pcre_free(re);
if (rc > 0) {
return SERVER_TYPE_DRIZZLE;
}
re= pcre_compile(DETECT_MARIADB_REGEX, 0, &error, &erroroffset, NULL);
if (!re) {
g_critical("Regular expression fail: %s", error);
exit(EXIT_FAILURE);
}
rc = pcre_exec(re, NULL, db_version, strlen(db_version), 0, 0, ovector, 9);
pcre_free(re);
if (rc > 0) {
return SERVER_TYPE_MYSQL;
}
return SERVER_TYPE_UNKNOWN;
}

28
server_detect.h Normal file
View File

@ -0,0 +1,28 @@
/*
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Authors: Andrew Hutchings, SkySQL (andrew at skysql dot com)
*/
#ifndef _server_detect_h
#define _server_detect_h
#include <mysql.h>
#define DETECT_MYSQL_REGEX "^([3-9]\\.[0-9]+\\.[0-9]+)"
#define DETECT_DRIZZLE_REGEX "^(20[0-9]{2}\\.(0[1-9]|1[012])\\.[0-9]+)"
#define DETECT_MARIADB_REGEX "^([0-9]{1,2}\\.[0-9]+\\.[0-9]+)"
enum server_type { SERVER_TYPE_UNKNOWN, SERVER_TYPE_MYSQL, SERVER_TYPE_DRIZZLE };
int detect_server(MYSQL *conn);
#endif