From 597b0ff2e2d2e3517c79b6841a4e17483ad239bb Mon Sep 17 00:00:00 2001 From: Ralf Becker Date: Fri, 11 Jul 2014 17:36:27 +0000 Subject: [PATCH] * Backup: backup could contain rows multiple times (which caused restore to fail) --- phpgwapi/inc/class.db_backup.inc.php | 38 ++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/phpgwapi/inc/class.db_backup.inc.php b/phpgwapi/inc/class.db_backup.inc.php index 900bc0ffb3..c33fded96d 100644 --- a/phpgwapi/inc/class.db_backup.inc.php +++ b/phpgwapi/inc/class.db_backup.inc.php @@ -7,7 +7,7 @@ * @package api * @subpackage db * @author Ralf Becker - * @copyright (c) 2003-12 by Ralf Becker + * @copyright (c) 2003-14 by Ralf Becker * @version $Id$ */ @@ -79,6 +79,12 @@ class db_backup * @var boolean */ var $backup_files = false ; + /** + * Reference to schema_proc's egw_db object + * + * @var egw_db + */ + var $db; /** * Constructor @@ -278,7 +284,7 @@ class db_backup /* Sort the files by ctime. */ krsort($files); $count = 0; - foreach($files as $ctime => $file) + foreach($files as $file) { if ($count >= $this->backup_mincount)// { @@ -411,6 +417,7 @@ class db_backup } $table = False; $n = 0; + $rows = array(); while(!feof($f)) { $line = trim(fgets($f)); ++$n; @@ -620,7 +627,6 @@ class db_backup if($type == 'zip') { fclose($f); - $f = $save_f; unlink($name); rmdir($dir.'/database_backup'); } @@ -723,7 +729,7 @@ class db_backup $str_pending = False; $n = 0; - foreach($fields as $i => $field) + foreach($fields as $field) { if ($str_pending !== False) { @@ -839,12 +845,24 @@ class db_backup { if (in_array($table,$this->exclude_tables)) continue; // dont backup - $total = 0; + // do we have a primary key? + // --> use it to order and limit rows, to kope with rows being added during backup + // otherwise new rows can cause rows being backed up twice and + // backups don't restore because of doublicate keys + $pk = $schema['pk'] && count($schema['pk']) == 1 ? $schema['pk'][0] : null; + + $total = $max = 0; do { $num_rows = 0; - // querying only chunks for 100 rows, to not run into memory limit on huge tables - foreach($this->db->select($table, '*', false, __LINE__, __FILE__, $total, '', false, self::ROW_CHUNK) as $row) + // querying only chunks for 10000 rows, to not run into memory limit on huge tables + foreach($this->db->select($table, '*', + empty($pk) ? false : $pk.' > '.$max, // limit by maximum primary key already received + __LINE__, __FILE__, + empty($pk) ? $total : 0, // if no primary limit by number of received rows + empty($pk) ? '' : 'ORDER BY '.$pk.' ASC', // order by primary key + false, self::ROW_CHUNK) as $row) { + if (!empty($pk)) $max = $row[$pk]; if ($total === 0) fwrite($f,"\ntable: $table\n".implode(',',array_keys($row))."\n"); array_walk($row,array('db_backup','escape_data'),$schema['fd']); @@ -870,14 +888,14 @@ class db_backup //echo $name.'
'; $zip->addFile($name, 'database_backup/'.basename($name)); $count = 1; - foreach($file_list as $num => $file) + foreach($file_list as $file) { //echo substr($file,strlen($dir)+1).'
'; //echo $file.'
'; $zip->addFile($file,substr($file,strlen($dir)+1));//,substr($file); if(($count++) == 100) { // the file descriptor limit $zip->close(); - if($zip = new ZipArchive()) { + if(($zip = new ZipArchive())) { $zip->open($filename); $count =0; } @@ -1009,7 +1027,7 @@ class db_backup } else { - if (!$only_vals && $key === 'nullable') + if ($key === 'nullable') { $def .= $val ? 'True' : 'False'; }